Initial commit
This commit is contained in:
959
migration_scripts/discovery/service_inventory_collector.sh
Executable file
959
migration_scripts/discovery/service_inventory_collector.sh
Executable file
@@ -0,0 +1,959 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Service Inventory Collector
|
||||
# Comprehensive discovery of all running services, containers, and configurations
|
||||
# Part of the Current State Discovery Framework
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
DISCOVERY_DIR="${SCRIPT_DIR}/results"
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
REPORT_FILE="${DISCOVERY_DIR}/service_inventory_${TIMESTAMP}.json"
|
||||
|
||||
# Create discovery directory
|
||||
mkdir -p "$DISCOVERY_DIR"
|
||||
|
||||
main() {
|
||||
echo "🔍 Starting service inventory collection..."
|
||||
|
||||
# Initialize JSON report
|
||||
cat > "$REPORT_FILE" << 'EOF'
|
||||
{
|
||||
"discovery_metadata": {
|
||||
"timestamp": "",
|
||||
"hostname": "",
|
||||
"discovery_version": "1.0"
|
||||
},
|
||||
"docker_services": {},
|
||||
"system_services": {},
|
||||
"web_services": {},
|
||||
"databases": {},
|
||||
"media_services": {},
|
||||
"monitoring_services": {},
|
||||
"configuration_files": {},
|
||||
"custom_applications": {}
|
||||
}
|
||||
EOF
|
||||
|
||||
collect_metadata
|
||||
collect_docker_services
|
||||
collect_system_services
|
||||
collect_web_services
|
||||
collect_databases
|
||||
collect_media_services
|
||||
collect_monitoring_services
|
||||
collect_configuration_files
|
||||
collect_custom_applications
|
||||
|
||||
echo "✅ Service inventory complete: $REPORT_FILE"
|
||||
generate_summary
|
||||
}
|
||||
|
||||
collect_metadata() {
|
||||
echo "📋 Collecting metadata..."
|
||||
|
||||
jq --arg timestamp "$(date -Iseconds)" \
|
||||
--arg hostname "$(hostname)" \
|
||||
'.discovery_metadata.timestamp = $timestamp | .discovery_metadata.hostname = $hostname' \
|
||||
"$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_docker_services() {
|
||||
echo "🐳 Collecting Docker services..."
|
||||
|
||||
local docker_services=$(cat << 'EOF'
|
||||
{
|
||||
"containers": [],
|
||||
"images": [],
|
||||
"networks": [],
|
||||
"volumes": [],
|
||||
"compose_files": [],
|
||||
"docker_info": {}
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
if ! command -v docker &>/dev/null; then
|
||||
jq --argjson docker_services "$docker_services" '.docker_services = $docker_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
return
|
||||
fi
|
||||
|
||||
# Docker containers
|
||||
local containers='[]'
|
||||
if docker info &>/dev/null; then
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local container_data=$(echo "$line" | jq -R 'split(",") | {id: .[0], name: .[1], image: .[2], status: .[3], ports: .[4], created: .[5]}')
|
||||
containers=$(echo "$containers" | jq ". + [$container_data]")
|
||||
fi
|
||||
done < <(docker ps -a --format "{{.ID}},{{.Names}},{{.Image}},{{.Status}},{{.Ports}},{{.CreatedAt}}" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Docker images
|
||||
local images='[]'
|
||||
if docker info &>/dev/null; then
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local image_data=$(echo "$line" | jq -R 'split(",") | {repository: .[0], tag: .[1], id: .[2], created: .[3], size: .[4]}')
|
||||
images=$(echo "$images" | jq ". + [$image_data]")
|
||||
fi
|
||||
done < <(docker images --format "{{.Repository}},{{.Tag}},{{.ID}},{{.CreatedAt}},{{.Size}}" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Docker networks
|
||||
local networks='[]'
|
||||
if docker info &>/dev/null; then
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local network_name=$(echo "$line" | awk '{print $1}')
|
||||
local network_inspect=$(docker network inspect "$network_name" 2>/dev/null | jq '.[0] | {name: .Name, driver: .Driver, scope: .Scope, subnet: (.IPAM.Config[0].Subnet // ""), gateway: (.IPAM.Config[0].Gateway // "")}')
|
||||
networks=$(echo "$networks" | jq ". + [$network_inspect]")
|
||||
fi
|
||||
done < <(docker network ls --format "{{.Name}}" 2>/dev/null | grep -v "^$" || echo "")
|
||||
fi
|
||||
|
||||
# Docker volumes
|
||||
local volumes='[]'
|
||||
if docker info &>/dev/null; then
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local volume_name=$(echo "$line" | awk '{print $1}')
|
||||
local volume_inspect=$(docker volume inspect "$volume_name" 2>/dev/null | jq '.[0] | {name: .Name, driver: .Driver, mountpoint: .Mountpoint}')
|
||||
|
||||
# Get volume size
|
||||
local mountpoint=$(echo "$volume_inspect" | jq -r '.mountpoint')
|
||||
local volume_size="unknown"
|
||||
if [[ -d "$mountpoint" ]]; then
|
||||
volume_size=$(du -sh "$mountpoint" 2>/dev/null | awk '{print $1}' || echo "unknown")
|
||||
fi
|
||||
|
||||
volume_inspect=$(echo "$volume_inspect" | jq --arg size "$volume_size" '. + {size: $size}')
|
||||
volumes=$(echo "$volumes" | jq ". + [$volume_inspect]")
|
||||
fi
|
||||
done < <(docker volume ls --format "{{.Name}}" 2>/dev/null | grep -v "^$" || echo "")
|
||||
fi
|
||||
|
||||
# Find Docker Compose files
|
||||
local compose_files='[]'
|
||||
local compose_locations=(
|
||||
"/opt"
|
||||
"/home"
|
||||
"/var/lib"
|
||||
"$HOME"
|
||||
"$(pwd)"
|
||||
)
|
||||
|
||||
for location in "${compose_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r compose_file; do
|
||||
if [[ -f "$compose_file" ]]; then
|
||||
local compose_info=$(jq -n --arg path "$compose_file" --arg size "$(wc -l < "$compose_file" 2>/dev/null || echo 0)" \
|
||||
'{path: $path, lines: ($size | tonumber)}')
|
||||
compose_files=$(echo "$compose_files" | jq ". + [$compose_info]")
|
||||
fi
|
||||
done < <(find "$location" -name "docker-compose*.yml" -o -name "compose*.yml" 2>/dev/null | head -20)
|
||||
fi
|
||||
done
|
||||
|
||||
# Docker info
|
||||
local docker_info='{}'
|
||||
if docker info &>/dev/null; then
|
||||
local docker_version=$(docker version --format '{{.Server.Version}}' 2>/dev/null || echo "unknown")
|
||||
local storage_driver=$(docker info --format '{{.Driver}}' 2>/dev/null || echo "unknown")
|
||||
local total_containers=$(docker info --format '{{.Containers}}' 2>/dev/null || echo "0")
|
||||
local running_containers=$(docker info --format '{{.ContainersRunning}}' 2>/dev/null || echo "0")
|
||||
|
||||
docker_info=$(jq -n --arg version "$docker_version" \
|
||||
--arg driver "$storage_driver" \
|
||||
--arg total "$total_containers" \
|
||||
--arg running "$running_containers" \
|
||||
'{version: $version, storage_driver: $driver, total_containers: ($total | tonumber), running_containers: ($running | tonumber)}')
|
||||
fi
|
||||
|
||||
docker_services=$(echo "$docker_services" | jq --argjson containers "$containers" \
|
||||
--argjson images "$images" \
|
||||
--argjson networks "$networks" \
|
||||
--argjson volumes "$volumes" \
|
||||
--argjson compose_files "$compose_files" \
|
||||
--argjson docker_info "$docker_info" \
|
||||
'.containers = $containers | .images = $images | .networks = $networks | .volumes = $volumes | .compose_files = $compose_files | .docker_info = $docker_info')
|
||||
|
||||
jq --argjson docker_services "$docker_services" '.docker_services = $docker_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_system_services() {
|
||||
echo "⚙️ Collecting system services..."
|
||||
|
||||
local system_services=$(cat << 'EOF'
|
||||
{
|
||||
"systemd_services": [],
|
||||
"cron_jobs": [],
|
||||
"startup_scripts": [],
|
||||
"background_processes": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Systemd services
|
||||
local systemd_services='[]'
|
||||
if command -v systemctl &>/dev/null; then
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local service_data=$(echo "$line" | jq -R 'split(" ") | {name: .[0], load: .[1], active: .[2], sub: .[3], description: (.[4:] | join(" "))}')
|
||||
systemd_services=$(echo "$systemd_services" | jq ". + [$service_data]")
|
||||
fi
|
||||
done < <(systemctl list-units --type=service --no-pager --no-legend --state=active | head -50)
|
||||
fi
|
||||
|
||||
# Cron jobs
|
||||
local cron_jobs='[]'
|
||||
|
||||
# System cron jobs
|
||||
if [[ -d /etc/cron.d ]]; then
|
||||
for cron_file in /etc/cron.d/*; do
|
||||
if [[ -f "$cron_file" ]]; then
|
||||
while IFS= read -r line; do
|
||||
if [[ "$line" =~ ^[^#] && -n "$line" ]]; then
|
||||
local job_info=$(jq -n --arg file "$(basename "$cron_file")" --arg job "$line" \
|
||||
'{source: $file, type: "system", job: $job}')
|
||||
cron_jobs=$(echo "$cron_jobs" | jq ". + [$job_info]")
|
||||
fi
|
||||
done < "$cron_file"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# User cron jobs
|
||||
if command -v crontab &>/dev/null; then
|
||||
local user_cron=$(crontab -l 2>/dev/null || echo "")
|
||||
if [[ -n "$user_cron" ]]; then
|
||||
while IFS= read -r line; do
|
||||
if [[ "$line" =~ ^[^#] && -n "$line" ]]; then
|
||||
local job_info=$(jq -n --arg user "$(whoami)" --arg job "$line" \
|
||||
'{source: $user, type: "user", job: $job}')
|
||||
cron_jobs=$(echo "$cron_jobs" | jq ". + [$job_info]")
|
||||
fi
|
||||
done <<< "$user_cron"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Startup scripts
|
||||
local startup_scripts='[]'
|
||||
local startup_locations=("/etc/init.d" "/etc/systemd/system" "/home/*/.*profile" "/etc/profile.d")
|
||||
|
||||
for location_pattern in "${startup_locations[@]}"; do
|
||||
for location in $location_pattern; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r script_file; do
|
||||
if [[ -f "$script_file" && -x "$script_file" ]]; then
|
||||
local script_info=$(jq -n --arg path "$script_file" --arg name "$(basename "$script_file")" \
|
||||
'{path: $path, name: $name}')
|
||||
startup_scripts=$(echo "$startup_scripts" | jq ". + [$script_info]")
|
||||
fi
|
||||
done < <(find "$location" -maxdepth 1 -type f 2>/dev/null | head -20)
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Background processes (excluding kernel threads)
|
||||
local background_processes='[]'
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" ]]; then
|
||||
local process_data=$(echo "$line" | jq -R 'split(" ") | {pid: .[0], user: .[1], cpu: .[2], mem: .[3], command: (.[4:] | join(" "))}')
|
||||
background_processes=$(echo "$background_processes" | jq ". + [$process_data]")
|
||||
fi
|
||||
done < <(ps aux --no-headers | grep -v "^\[" | head -30)
|
||||
|
||||
system_services=$(echo "$system_services" | jq --argjson systemd_services "$systemd_services" \
|
||||
--argjson cron_jobs "$cron_jobs" \
|
||||
--argjson startup_scripts "$startup_scripts" \
|
||||
--argjson background_processes "$background_processes" \
|
||||
'.systemd_services = $systemd_services | .cron_jobs = $cron_jobs | .startup_scripts = $startup_scripts | .background_processes = $background_processes')
|
||||
|
||||
jq --argjson system_services "$system_services" '.system_services = $system_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_web_services() {
|
||||
echo "🌐 Collecting web services..."
|
||||
|
||||
local web_services=$(cat << 'EOF'
|
||||
{
|
||||
"web_servers": [],
|
||||
"reverse_proxies": [],
|
||||
"ssl_certificates": [],
|
||||
"web_applications": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Detect web servers
|
||||
local web_servers='[]'
|
||||
|
||||
# Check for Nginx
|
||||
if command -v nginx &>/dev/null; then
|
||||
local nginx_version=$(nginx -v 2>&1 | cut -d: -f2 | tr -d ' ')
|
||||
local nginx_config=$(nginx -T 2>/dev/null | head -20 | jq -R . | jq -s 'join("\n")')
|
||||
local nginx_info=$(jq -n --arg version "$nginx_version" --argjson config "$nginx_config" \
|
||||
'{name: "nginx", version: $version, config_sample: $config}')
|
||||
web_servers=$(echo "$web_servers" | jq ". + [$nginx_info]")
|
||||
fi
|
||||
|
||||
# Check for Apache
|
||||
if command -v apache2 &>/dev/null || command -v httpd &>/dev/null; then
|
||||
local apache_cmd="apache2"
|
||||
command -v httpd &>/dev/null && apache_cmd="httpd"
|
||||
local apache_version=$($apache_cmd -v 2>/dev/null | head -1 | cut -d: -f2 | tr -d ' ')
|
||||
local apache_info=$(jq -n --arg version "$apache_version" \
|
||||
'{name: "apache", version: $version}')
|
||||
web_servers=$(echo "$web_servers" | jq ". + [$apache_info]")
|
||||
fi
|
||||
|
||||
# Check for Traefik (in containers)
|
||||
local traefik_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i traefik || echo "")
|
||||
if [[ -n "$traefik_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local traefik_info=$(jq -n --arg container "$container" \
|
||||
'{name: "traefik", type: "container", container_name: $container}')
|
||||
web_servers=$(echo "$web_servers" | jq ". + [$traefik_info]")
|
||||
fi
|
||||
done <<< "$traefik_containers"
|
||||
fi
|
||||
|
||||
# Detect reverse proxies
|
||||
local reverse_proxies='[]'
|
||||
# This would be detected above in web servers, but we can add specific proxy detection
|
||||
|
||||
# SSL certificates
|
||||
local ssl_certificates='[]'
|
||||
local cert_locations=("/etc/ssl/certs" "/etc/letsencrypt/live" "/opt/*/ssl" "/home/*/ssl")
|
||||
|
||||
for location_pattern in "${cert_locations[@]}"; do
|
||||
for location in $location_pattern; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r cert_file; do
|
||||
if [[ -f "$cert_file" ]]; then
|
||||
local cert_info=$(openssl x509 -in "$cert_file" -text -noout 2>/dev/null | head -20 || echo "")
|
||||
local subject=$(echo "$cert_info" | grep "Subject:" | head -1 | cut -d: -f2-)
|
||||
local issuer=$(echo "$cert_info" | grep "Issuer:" | head -1 | cut -d: -f2-)
|
||||
local not_after=$(echo "$cert_info" | grep "Not After" | head -1 | cut -d: -f2-)
|
||||
|
||||
if [[ -n "$subject" ]]; then
|
||||
local cert_data=$(jq -n --arg path "$cert_file" --arg subject "$subject" --arg issuer "$issuer" --arg expires "$not_after" \
|
||||
'{path: $path, subject: $subject, issuer: $issuer, expires: $expires}')
|
||||
ssl_certificates=$(echo "$ssl_certificates" | jq ". + [$cert_data]")
|
||||
fi
|
||||
fi
|
||||
done < <(find "$location" -name "*.crt" -o -name "*.pem" -o -name "cert.pem" 2>/dev/null | head -10)
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Web applications (detect common patterns)
|
||||
local web_applications='[]'
|
||||
|
||||
# Look for common web app directories
|
||||
local webapp_locations=("/var/www" "/opt" "/home/*/www" "/srv")
|
||||
for location_pattern in "${webapp_locations[@]}"; do
|
||||
for location in $location_pattern; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r app_dir; do
|
||||
if [[ -d "$app_dir" ]]; then
|
||||
local app_name=$(basename "$app_dir")
|
||||
local app_type="unknown"
|
||||
|
||||
# Detect application type
|
||||
if [[ -f "$app_dir/index.php" ]]; then
|
||||
app_type="php"
|
||||
elif [[ -f "$app_dir/package.json" ]]; then
|
||||
app_type="nodejs"
|
||||
elif [[ -f "$app_dir/requirements.txt" ]]; then
|
||||
app_type="python"
|
||||
elif [[ -f "$app_dir/index.html" ]]; then
|
||||
app_type="static"
|
||||
fi
|
||||
|
||||
local app_info=$(jq -n --arg name "$app_name" --arg path "$app_dir" --arg type "$app_type" \
|
||||
'{name: $name, path: $path, type: $type}')
|
||||
web_applications=$(echo "$web_applications" | jq ". + [$app_info]")
|
||||
fi
|
||||
done < <(find "$location" -maxdepth 2 -type d 2>/dev/null | head -10)
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
web_services=$(echo "$web_services" | jq --argjson web_servers "$web_servers" \
|
||||
--argjson reverse_proxies "$reverse_proxies" \
|
||||
--argjson ssl_certificates "$ssl_certificates" \
|
||||
--argjson web_applications "$web_applications" \
|
||||
'.web_servers = $web_servers | .reverse_proxies = $reverse_proxies | .ssl_certificates = $ssl_certificates | .web_applications = $web_applications')
|
||||
|
||||
jq --argjson web_services "$web_services" '.web_services = $web_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_databases() {
|
||||
echo "🗃️ Collecting database services..."
|
||||
|
||||
local databases=$(cat << 'EOF'
|
||||
{
|
||||
"postgresql": [],
|
||||
"mysql": [],
|
||||
"redis": [],
|
||||
"influxdb": [],
|
||||
"sqlite": [],
|
||||
"other": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# PostgreSQL
|
||||
local postgresql='[]'
|
||||
|
||||
# Check for PostgreSQL containers
|
||||
local postgres_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(postgres|postgresql)" || echo "")
|
||||
if [[ -n "$postgres_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local pg_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
env_vars: .Config.Env,
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
postgresql=$(echo "$postgresql" | jq ". + [$pg_info]")
|
||||
fi
|
||||
done <<< "$postgres_containers"
|
||||
fi
|
||||
|
||||
# Check for system PostgreSQL
|
||||
if command -v psql &>/dev/null; then
|
||||
local pg_version=$(psql --version 2>/dev/null | head -1 || echo "unknown")
|
||||
local pg_system_info=$(jq -n --arg version "$pg_version" --arg type "system" \
|
||||
'{type: $type, version: $version}')
|
||||
postgresql=$(echo "$postgresql" | jq ". + [$pg_system_info]")
|
||||
fi
|
||||
|
||||
# MySQL/MariaDB
|
||||
local mysql='[]'
|
||||
|
||||
# Check for MySQL containers
|
||||
local mysql_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(mysql|mariadb)" || echo "")
|
||||
if [[ -n "$mysql_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local mysql_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
env_vars: .Config.Env,
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
mysql=$(echo "$mysql" | jq ". + [$mysql_info]")
|
||||
fi
|
||||
done <<< "$mysql_containers"
|
||||
fi
|
||||
|
||||
# Check for system MySQL
|
||||
if command -v mysql &>/dev/null; then
|
||||
local mysql_version=$(mysql --version 2>/dev/null | head -1 || echo "unknown")
|
||||
local mysql_system_info=$(jq -n --arg version "$mysql_version" --arg type "system" \
|
||||
'{type: $type, version: $version}')
|
||||
mysql=$(echo "$mysql" | jq ". + [$mysql_system_info]")
|
||||
fi
|
||||
|
||||
# Redis
|
||||
local redis='[]'
|
||||
|
||||
# Check for Redis containers
|
||||
local redis_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i redis || echo "")
|
||||
if [[ -n "$redis_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local redis_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
redis=$(echo "$redis" | jq ". + [$redis_info]")
|
||||
fi
|
||||
done <<< "$redis_containers"
|
||||
fi
|
||||
|
||||
# Check for system Redis
|
||||
if command -v redis-server &>/dev/null; then
|
||||
local redis_version=$(redis-server --version 2>/dev/null | head -1 || echo "unknown")
|
||||
local redis_system_info=$(jq -n --arg version "$redis_version" --arg type "system" \
|
||||
'{type: $type, version: $version}')
|
||||
redis=$(echo "$redis" | jq ". + [$redis_system_info]")
|
||||
fi
|
||||
|
||||
# InfluxDB
|
||||
local influxdb='[]'
|
||||
|
||||
# Check for InfluxDB containers
|
||||
local influx_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i influx || echo "")
|
||||
if [[ -n "$influx_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local influx_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
influxdb=$(echo "$influxdb" | jq ". + [$influx_info]")
|
||||
fi
|
||||
done <<< "$influx_containers"
|
||||
fi
|
||||
|
||||
# SQLite databases
|
||||
local sqlite='[]'
|
||||
local sqlite_locations=("/var/lib" "/opt" "/home" "/data")
|
||||
|
||||
for location in "${sqlite_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r sqlite_file; do
|
||||
if [[ -f "$sqlite_file" ]]; then
|
||||
local sqlite_size=$(du -h "$sqlite_file" 2>/dev/null | awk '{print $1}' || echo "unknown")
|
||||
local sqlite_info=$(jq -n --arg path "$sqlite_file" --arg size "$sqlite_size" \
|
||||
'{path: $path, size: $size}')
|
||||
sqlite=$(echo "$sqlite" | jq ". + [$sqlite_info]")
|
||||
fi
|
||||
done < <(find "$location" -name "*.db" -o -name "*.sqlite" -o -name "*.sqlite3" 2>/dev/null | head -10)
|
||||
fi
|
||||
done
|
||||
|
||||
# Other databases
|
||||
local other='[]'
|
||||
local other_db_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(mongo|cassandra|elasticsearch|neo4j)" || echo "")
|
||||
if [[ -n "$other_db_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image
|
||||
}' || echo '{}')
|
||||
other=$(echo "$other" | jq ". + [$other_info]")
|
||||
fi
|
||||
done <<< "$other_db_containers"
|
||||
fi
|
||||
|
||||
databases=$(echo "$databases" | jq --argjson postgresql "$postgresql" \
|
||||
--argjson mysql "$mysql" \
|
||||
--argjson redis "$redis" \
|
||||
--argjson influxdb "$influxdb" \
|
||||
--argjson sqlite "$sqlite" \
|
||||
--argjson other "$other" \
|
||||
'.postgresql = $postgresql | .mysql = $mysql | .redis = $redis | .influxdb = $influxdb | .sqlite = $sqlite | .other = $other')
|
||||
|
||||
jq --argjson databases "$databases" '.databases = $databases' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_media_services() {
|
||||
echo "📺 Collecting media services..."
|
||||
|
||||
local media_services=$(cat << 'EOF'
|
||||
{
|
||||
"jellyfin": [],
|
||||
"plex": [],
|
||||
"immich": [],
|
||||
"nextcloud": [],
|
||||
"media_libraries": [],
|
||||
"other_media": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Jellyfin
|
||||
local jellyfin='[]'
|
||||
local jellyfin_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i jellyfin || echo "")
|
||||
if [[ -n "$jellyfin_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local jellyfin_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
jellyfin=$(echo "$jellyfin" | jq ". + [$jellyfin_info]")
|
||||
fi
|
||||
done <<< "$jellyfin_containers"
|
||||
fi
|
||||
|
||||
# Plex
|
||||
local plex='[]'
|
||||
local plex_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i plex || echo "")
|
||||
if [[ -n "$plex_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local plex_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
plex=$(echo "$plex" | jq ". + [$plex_info]")
|
||||
fi
|
||||
done <<< "$plex_containers"
|
||||
fi
|
||||
|
||||
# Immich
|
||||
local immich='[]'
|
||||
local immich_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i immich || echo "")
|
||||
if [[ -n "$immich_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local immich_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
immich=$(echo "$immich" | jq ". + [$immich_info]")
|
||||
fi
|
||||
done <<< "$immich_containers"
|
||||
fi
|
||||
|
||||
# Nextcloud
|
||||
local nextcloud='[]'
|
||||
local nextcloud_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i nextcloud || echo "")
|
||||
if [[ -n "$nextcloud_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local nextcloud_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
|
||||
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
|
||||
}' || echo '{}')
|
||||
nextcloud=$(echo "$nextcloud" | jq ". + [$nextcloud_info]")
|
||||
fi
|
||||
done <<< "$nextcloud_containers"
|
||||
fi
|
||||
|
||||
# Media libraries
|
||||
local media_libraries='[]'
|
||||
local media_locations=("/media" "/mnt" "/data" "/home/*/Media" "/opt/media")
|
||||
|
||||
for location_pattern in "${media_locations[@]}"; do
|
||||
for location in $location_pattern; do
|
||||
if [[ -d "$location" ]]; then
|
||||
local media_size=$(du -sh "$location" 2>/dev/null | awk '{print $1}' || echo "unknown")
|
||||
local media_count=$(find "$location" -type f 2>/dev/null | wc -l || echo "0")
|
||||
local media_info=$(jq -n --arg path "$location" --arg size "$media_size" --arg files "$media_count" \
|
||||
'{path: $path, size: $size, file_count: ($files | tonumber)}')
|
||||
media_libraries=$(echo "$media_libraries" | jq ". + [$media_info]")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Other media services
|
||||
local other_media='[]'
|
||||
local other_media_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(sonarr|radarr|bazarr|lidarr|prowlarr|transmission|deluge)" || echo "")
|
||||
if [[ -n "$other_media_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
|
||||
}' || echo '{}')
|
||||
other_media=$(echo "$other_media" | jq ". + [$other_info]")
|
||||
fi
|
||||
done <<< "$other_media_containers"
|
||||
fi
|
||||
|
||||
media_services=$(echo "$media_services" | jq --argjson jellyfin "$jellyfin" \
|
||||
--argjson plex "$plex" \
|
||||
--argjson immich "$immich" \
|
||||
--argjson nextcloud "$nextcloud" \
|
||||
--argjson media_libraries "$media_libraries" \
|
||||
--argjson other_media "$other_media" \
|
||||
'.jellyfin = $jellyfin | .plex = $plex | .immich = $immich | .nextcloud = $nextcloud | .media_libraries = $media_libraries | .other_media = $other_media')
|
||||
|
||||
jq --argjson media_services "$media_services" '.media_services = $media_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_monitoring_services() {
|
||||
echo "📊 Collecting monitoring services..."
|
||||
|
||||
local monitoring_services=$(cat << 'EOF'
|
||||
{
|
||||
"prometheus": [],
|
||||
"grafana": [],
|
||||
"influxdb": [],
|
||||
"log_management": [],
|
||||
"uptime_monitoring": [],
|
||||
"other_monitoring": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Prometheus
|
||||
local prometheus='[]'
|
||||
local prometheus_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i prometheus || echo "")
|
||||
if [[ -n "$prometheus_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local prometheus_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}]
|
||||
}' || echo '{}')
|
||||
prometheus=$(echo "$prometheus" | jq ". + [$prometheus_info]")
|
||||
fi
|
||||
done <<< "$prometheus_containers"
|
||||
fi
|
||||
|
||||
# Grafana
|
||||
local grafana='[]'
|
||||
local grafana_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i grafana || echo "")
|
||||
if [[ -n "$grafana_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local grafana_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}]
|
||||
}' || echo '{}')
|
||||
grafana=$(echo "$grafana" | jq ". + [$grafana_info]")
|
||||
fi
|
||||
done <<< "$grafana_containers"
|
||||
fi
|
||||
|
||||
# Log management
|
||||
local log_management='[]'
|
||||
local log_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(elastic|kibana|logstash|fluentd|loki)" || echo "")
|
||||
if [[ -n "$log_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local log_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
|
||||
}' || echo '{}')
|
||||
log_management=$(echo "$log_management" | jq ". + [$log_info]")
|
||||
fi
|
||||
done <<< "$log_containers"
|
||||
fi
|
||||
|
||||
# Other monitoring
|
||||
local other_monitoring='[]'
|
||||
local other_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(portainer|watchtower|node-exporter|cadvisor)" || echo "")
|
||||
if [[ -n "$other_containers" ]]; then
|
||||
while IFS= read -r container; do
|
||||
if [[ -n "$container" ]]; then
|
||||
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
|
||||
container_name: .Name,
|
||||
image: .Config.Image,
|
||||
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
|
||||
}' || echo '{}')
|
||||
other_monitoring=$(echo "$other_monitoring" | jq ". + [$other_info]")
|
||||
fi
|
||||
done <<< "$other_containers"
|
||||
fi
|
||||
|
||||
monitoring_services=$(echo "$monitoring_services" | jq --argjson prometheus "$prometheus" \
|
||||
--argjson grafana "$grafana" \
|
||||
--argjson log_management "$log_management" \
|
||||
--argjson other_monitoring "$other_monitoring" \
|
||||
'.prometheus = $prometheus | .grafana = $grafana | .log_management = $log_management | .other_monitoring = $other_monitoring')
|
||||
|
||||
jq --argjson monitoring_services "$monitoring_services" '.monitoring_services = $monitoring_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_configuration_files() {
|
||||
echo "📝 Collecting configuration files..."
|
||||
|
||||
local configuration_files=$(cat << 'EOF'
|
||||
{
|
||||
"docker_compose_files": [],
|
||||
"env_files": [],
|
||||
"config_directories": [],
|
||||
"ssl_certificates": [],
|
||||
"backup_configurations": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Docker Compose files (more detailed than before)
|
||||
local docker_compose_files='[]'
|
||||
local compose_locations=("/opt" "/home" "/var/lib" "$(pwd)" "/docker" "/containers")
|
||||
|
||||
for location in "${compose_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r compose_file; do
|
||||
if [[ -f "$compose_file" ]]; then
|
||||
local compose_services=$(grep -E "^ [a-zA-Z]" "$compose_file" | awk -F: '{print $1}' | tr -d ' ' | jq -R . | jq -s . 2>/dev/null || echo '[]')
|
||||
local compose_networks=$(grep -A 10 "^networks:" "$compose_file" | grep -E "^ [a-zA-Z]" | awk -F: '{print $1}' | tr -d ' ' | jq -R . | jq -s . 2>/dev/null || echo '[]')
|
||||
|
||||
local compose_info=$(jq -n --arg path "$compose_file" \
|
||||
--arg size "$(wc -l < "$compose_file" 2>/dev/null || echo 0)" \
|
||||
--argjson services "$compose_services" \
|
||||
--argjson networks "$compose_networks" \
|
||||
'{path: $path, lines: ($size | tonumber), services: $services, networks: $networks}')
|
||||
docker_compose_files=$(echo "$docker_compose_files" | jq ". + [$compose_info]")
|
||||
fi
|
||||
done < <(find "$location" -name "docker-compose*.yml" -o -name "compose*.yml" 2>/dev/null | head -20)
|
||||
fi
|
||||
done
|
||||
|
||||
# Environment files
|
||||
local env_files='[]'
|
||||
for location in "${compose_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r env_file; do
|
||||
if [[ -f "$env_file" ]]; then
|
||||
local env_vars_count=$(grep -c "=" "$env_file" 2>/dev/null || echo "0")
|
||||
local env_info=$(jq -n --arg path "$env_file" --arg vars "$env_vars_count" \
|
||||
'{path: $path, variable_count: ($vars | tonumber)}')
|
||||
env_files=$(echo "$env_files" | jq ". + [$env_info]")
|
||||
fi
|
||||
done < <(find "$location" -name ".env*" -o -name "*.env" 2>/dev/null | head -20)
|
||||
fi
|
||||
done
|
||||
|
||||
# Configuration directories
|
||||
local config_directories='[]'
|
||||
local config_locations=("/etc" "/opt/*/config" "/home/*/config" "/var/lib/*/config")
|
||||
|
||||
for location_pattern in "${config_locations[@]}"; do
|
||||
for location in $location_pattern; do
|
||||
if [[ -d "$location" ]]; then
|
||||
local config_size=$(du -sh "$location" 2>/dev/null | awk '{print $1}' || echo "unknown")
|
||||
local config_files=$(find "$location" -type f 2>/dev/null | wc -l || echo "0")
|
||||
local config_info=$(jq -n --arg path "$location" --arg size "$config_size" --arg files "$config_files" \
|
||||
'{path: $path, size: $size, file_count: ($files | tonumber)}')
|
||||
config_directories=$(echo "$config_directories" | jq ". + [$config_info]")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
configuration_files=$(echo "$configuration_files" | jq --argjson docker_compose_files "$docker_compose_files" \
|
||||
--argjson env_files "$env_files" \
|
||||
--argjson config_directories "$config_directories" \
|
||||
'.docker_compose_files = $docker_compose_files | .env_files = $env_files | .config_directories = $config_directories')
|
||||
|
||||
jq --argjson configuration_files "$configuration_files" '.configuration_files = $configuration_files' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
collect_custom_applications() {
|
||||
echo "🔧 Collecting custom applications..."
|
||||
|
||||
local custom_applications=$(cat << 'EOF'
|
||||
{
|
||||
"custom_scripts": [],
|
||||
"python_applications": [],
|
||||
"nodejs_applications": [],
|
||||
"automation_tools": [],
|
||||
"development_tools": []
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Custom scripts
|
||||
local custom_scripts='[]'
|
||||
local script_locations=("/opt" "/home/*/scripts" "/usr/local/bin" "$(pwd)")
|
||||
|
||||
for location in "${script_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r script_file; do
|
||||
if [[ -f "$script_file" && -x "$script_file" ]]; then
|
||||
local script_lines=$(wc -l < "$script_file" 2>/dev/null || echo "0")
|
||||
local script_type="unknown"
|
||||
|
||||
# Determine script type
|
||||
if [[ "$script_file" == *.py ]]; then
|
||||
script_type="python"
|
||||
elif [[ "$script_file" == *.sh ]]; then
|
||||
script_type="bash"
|
||||
elif [[ "$script_file" == *.js ]]; then
|
||||
script_type="javascript"
|
||||
else
|
||||
local shebang=$(head -1 "$script_file" 2>/dev/null || echo "")
|
||||
if [[ "$shebang" =~ python ]]; then
|
||||
script_type="python"
|
||||
elif [[ "$shebang" =~ bash ]]; then
|
||||
script_type="bash"
|
||||
fi
|
||||
fi
|
||||
|
||||
local script_info=$(jq -n --arg path "$script_file" --arg type "$script_type" --arg lines "$script_lines" \
|
||||
'{path: $path, type: $type, lines: ($lines | tonumber)}')
|
||||
custom_scripts=$(echo "$custom_scripts" | jq ". + [$script_info]")
|
||||
fi
|
||||
done < <(find "$location" -type f -name "*.py" -o -name "*.sh" -o -name "*.js" 2>/dev/null | head -20)
|
||||
fi
|
||||
done
|
||||
|
||||
# Python applications
|
||||
local python_applications='[]'
|
||||
local python_locations=("/opt" "/home" "/var/lib")
|
||||
|
||||
for location in "${python_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r python_app; do
|
||||
if [[ -f "$python_app/requirements.txt" || -f "$python_app/setup.py" || -f "$python_app/pyproject.toml" ]]; then
|
||||
local app_name=$(basename "$python_app")
|
||||
local has_requirements=$(test -f "$python_app/requirements.txt" && echo "true" || echo "false")
|
||||
local has_venv=$(test -d "$python_app/venv" -o -d "$python_app/.venv" && echo "true" || echo "false")
|
||||
|
||||
local app_info=$(jq -n --arg name "$app_name" --arg path "$python_app" --arg requirements "$has_requirements" --arg venv "$has_venv" \
|
||||
'{name: $name, path: $path, has_requirements: ($requirements | test("true")), has_virtualenv: ($venv | test("true"))}')
|
||||
python_applications=$(echo "$python_applications" | jq ". + [$app_info]")
|
||||
fi
|
||||
done < <(find "$location" -type d -maxdepth 3 2>/dev/null)
|
||||
fi
|
||||
done
|
||||
|
||||
# Node.js applications
|
||||
local nodejs_applications='[]'
|
||||
|
||||
for location in "${python_locations[@]}"; do
|
||||
if [[ -d "$location" ]]; then
|
||||
while IFS= read -r nodejs_app; do
|
||||
if [[ -f "$nodejs_app/package.json" ]]; then
|
||||
local app_name=$(basename "$nodejs_app")
|
||||
local has_node_modules=$(test -d "$nodejs_app/node_modules" && echo "true" || echo "false")
|
||||
|
||||
local app_info=$(jq -n --arg name "$app_name" --arg path "$nodejs_app" --arg modules "$has_node_modules" \
|
||||
'{name: $name, path: $path, has_node_modules: ($modules | test("true"))}')
|
||||
nodejs_applications=$(echo "$nodejs_applications" | jq ". + [$app_info]")
|
||||
fi
|
||||
done < <(find "$location" -name "package.json" 2>/dev/null | head -10 | xargs dirname)
|
||||
fi
|
||||
done
|
||||
|
||||
custom_applications=$(echo "$custom_applications" | jq --argjson custom_scripts "$custom_scripts" \
|
||||
--argjson python_applications "$python_applications" \
|
||||
--argjson nodejs_applications "$nodejs_applications" \
|
||||
'.custom_scripts = $custom_scripts | .python_applications = $python_applications | .nodejs_applications = $nodejs_applications')
|
||||
|
||||
jq --argjson custom_applications "$custom_applications" '.custom_applications = $custom_applications' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
|
||||
}
|
||||
|
||||
generate_summary() {
|
||||
echo ""
|
||||
echo "📋 SERVICE INVENTORY SUMMARY"
|
||||
echo "=========================="
|
||||
|
||||
# Extract key counts for summary
|
||||
local containers_count=$(jq '.docker_services.containers | length' "$REPORT_FILE")
|
||||
local images_count=$(jq '.docker_services.images | length' "$REPORT_FILE")
|
||||
local compose_files_count=$(jq '.configuration_files.docker_compose_files | length' "$REPORT_FILE")
|
||||
local databases_count=$(jq '[.databases.postgresql, .databases.mysql, .databases.redis, .databases.influxdb] | add | length' "$REPORT_FILE")
|
||||
local web_servers_count=$(jq '.web_services.web_servers | length' "$REPORT_FILE")
|
||||
local media_services_count=$(jq '[.media_services.jellyfin, .media_services.plex, .media_services.immich, .media_services.nextcloud] | add | length' "$REPORT_FILE")
|
||||
|
||||
echo "Docker Containers: $containers_count"
|
||||
echo "Docker Images: $images_count"
|
||||
echo "Compose Files: $compose_files_count"
|
||||
echo "Databases: $databases_count"
|
||||
echo "Web Servers: $web_servers_count"
|
||||
echo "Media Services: $media_services_count"
|
||||
echo ""
|
||||
echo "Full report: $REPORT_FILE"
|
||||
echo "Next: Run data_layout_mapper.sh"
|
||||
}
|
||||
|
||||
# Execute main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user