Initial commit

This commit is contained in:
admin
2025-08-24 11:13:39 -04:00
commit fb869f1131
168 changed files with 47986 additions and 0 deletions

View File

@@ -0,0 +1,162 @@
#!/bin/bash
#
# Comprehensive State Discovery Script
# Gathers all necessary information for a zero-downtime migration.
#
set -euo pipefail
# --- Configuration ---
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
HOSTNAME=$(hostname -f)
OUTPUT_BASE_DIR="/tmp/system_audit_${HOSTNAME}_${TIMESTAMP}"
DISCOVERY_DIR="${OUTPUT_BASE_DIR}/discovery"
mkdir -p "$DISCOVERY_DIR"
LOG_FILE="${OUTPUT_BASE_DIR}/discovery.log"
# --- Logging ---
exec > >(tee -a "$LOG_FILE") 2>&1
echo "Starting Comprehensive State Discovery on ${HOSTNAME} at $(date)"
echo "Output will be saved in ${OUTPUT_BASE_DIR}"
echo "-----------------------------------------------------"
# --- Helper Functions ---
print_header() {
echo ""
echo "====================================================="
echo ">= $1"
echo "====================================================="
}
run_command() {
local title="$1"
local command="$2"
local output_file="$3"
print_header "$title"
echo "Running command: $command"
echo "Outputting to: $output_file"
if eval "$command" > "$output_file"; then
echo "Successfully captured $title."
else
echo "Warning: Command for '$title' failed or produced no output." > "$output_file"
fi
}
# --- 1. Infrastructure Discovery ---
infra_discovery() {
local out_dir="${DISCOVERY_DIR}/1_infrastructure"
mkdir -p "$out_dir"
run_command "CPU Information" "lscpu" "${out_dir}/cpu_info.txt"
run_command "Memory Information" "free -h" "${out_dir}/memory_info.txt"
run_command "PCI Devices (including GPU)" "lspci -v" "${out_dir}/pci_devices.txt"
run_command "USB Devices" "lsusb -v" "${out_dir}/usb_devices.txt"
run_command "Block Devices & Storage" "lsblk -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT" "${out_dir}/storage_layout.txt"
run_command "Filesystem Usage" "df -hT" "${out_dir}/disk_usage.txt"
run_command "RAID Status" "cat /proc/mdstat || true" "${out_dir}/raid_status.txt"
run_command "OS & Kernel Version" "cat /etc/os-release && uname -a" "${out_dir}/os_info.txt"
run_command "Network Interfaces" "ip -br a" "${out_dir}/network_interfaces.txt"
run_command "Routing Table" "ip r" "${out_dir}/routing_table.txt"
run_command "DNS Configuration" "cat /etc/resolv.conf" "${out_dir}/dns_config.txt"
run_command "Firewall Status (UFW)" "sudo ufw status verbose || true" "${out_dir}/firewall_ufw.txt"
run_command "Firewall Status (iptables)" "sudo iptables -L -n -v || true" "${out_dir}/firewall_iptables.txt"
}
# --- 2. Services Inventory ---
services_inventory() {
local out_dir="${DISCOVERY_DIR}/2_services"
mkdir -p "$out_dir"
# Docker
if command -v docker &> /dev/null; then
run_command "Docker Info" "docker info" "${out_dir}/docker_info.txt"
run_command "Docker Running Containers" "docker ps -a" "${out_dir}/docker_ps.txt"
run_command "Docker Images" "docker images" "${out_dir}/docker_images.txt"
run_command "Docker Networks" "docker network ls" "${out_dir}/docker_networks.txt"
run_command "Docker Volumes" "docker volume ls" "${out_dir}/docker_volumes.txt"
print_header "Docker Container Details"
for id in $(docker ps -q); do
local name=$(docker inspect --format '{{.Name}}' "$id" | sed 's,^/,,')
echo "Inspecting container: $name"
docker inspect "$id" > "${out_dir}/container_${name}.json"
done
print_header "Finding Docker Compose files"
sudo find / -name "docker-compose.yml" -o -name "docker-compose.yaml" -o -name "compose.yml" > "${out_dir}/docker_compose_locations.txt" 2>/dev/null
while IFS= read -r file; do
sudo cp "$file" "${out_dir}/compose_file_$(basename "$(dirname "$file")").yml"
done < "${out_dir}/docker_compose_locations.txt"
else
echo "Docker not found." > "${out_dir}/docker_status.txt"
fi
# Systemd Services
run_command "Systemd Services (Enabled)" "systemctl list-unit-files --state=enabled" "${out_dir}/systemd_enabled_services.txt"
run_command "Systemd Services (Running)" "systemctl list-units --type=service --state=running" "${out_dir}/systemd_running_services.txt"
}
# --- 3. Data & Storage Discovery ---
data_discovery() {
local out_dir="${DISCOVERY_DIR}/3_data_storage"
mkdir -p "$out_dir"
run_command "NFS Exports" "showmount -e localhost || true" "${out_dir}/nfs_exports.txt"
run_command "Mounted File Systems" "mount" "${out_dir}/mounts.txt"
print_header "Searching for critical data directories"
# Common database data directories
sudo find / -name "postgresql.conf" > "${out_dir}/postgres_locations.txt" 2>/dev/null || true
sudo find / -name "my.cnf" > "${out_dir}/mysql_locations.txt" 2>/dev/null || true
sudo find /var/lib/ -name "*.db" > "${out_dir}/sqlite_locations.txt" 2>/dev/null || true
# Common media/app data directories
sudo find /srv /mnt /opt -maxdepth 3 > "${out_dir}/common_data_dirs.txt" 2>/dev/null || true
}
# --- 4. Security & Access Discovery ---
security_discovery() {
local out_dir="${DISCOVERY_DIR}/4_security"
mkdir -p "$out_dir"
run_command "User Accounts" "cat /etc/passwd" "${out_dir}/users.txt"
run_command "Sudoers Configuration" "sudo cat /etc/sudoers" "${out_dir}/sudoers.txt"
run_command "SSH Daemon Configuration" "sudo cat /etc/ssh/sshd_config" "${out_dir}/sshd_config.txt"
run_command "Last Logins" "last -a" "${out_dir}/last_logins.txt"
run_command "Open Ports" "sudo ss -tuln" "${out_dir}/open_ports.txt"
run_command "Cron Jobs (System)" "sudo cat /etc/crontab || true" "${out_dir}/crontab_system.txt"
run_command "Cron Jobs (User)" "for user in $(cut -f1 -d: /etc/passwd); do crontab -u \"$user\" -l 2>/dev/null | sed \"s/^/[user] /\" ; done || true" "${out_dir}/crontab_users.txt"
}
# --- 5. Performance & Usage ---
performance_discovery() {
local out_dir="${DISCOVERY_DIR}/5_performance"
mkdir -p "$out_dir"
run_command "Current Processes" "ps aux" "${out_dir}/processes.txt"
run_command "Uptime & Load" "uptime" "${out_dir}/uptime.txt"
run_command "Network Stats" "netstat -s" "${out_dir}/netstat.txt"
run_command "IO Stats" "iostat -x 1 2" "${out_dir}/iostat.txt"
}
# --- Main Execution ---
main() {
infra_discovery
services_inventory
data_discovery
security_discovery
performance_discovery
print_header "Packaging Results"
tar -czf "${OUTPUT_BASE_DIR}.tar.gz" -C "$(dirname "$OUTPUT_BASE_DIR")" "$(basename "$OUTPUT_BASE_DIR")"
echo "-----------------------------------------------------"
echo "Discovery complete."
echo "Results packaged in ${OUTPUT_BASE_DIR}.tar.gz"
}
main

View File

@@ -0,0 +1,242 @@
# Current State Discovery Plan
**Purpose**: Gather all critical information about the existing setup to ensure successful migration and optimization
**Status**: Required before any migration attempt
## 1. INFRASTRUCTURE DISCOVERY
### Hardware & System Information
- [ ] **Server Hardware Details**
- CPU specifications (cores, architecture, capabilities)
- RAM capacity and configuration
- Storage devices (SSDs, HDDs, sizes, mount points)
- GPU hardware (NVIDIA/AMD/Intel for acceleration)
- Network interfaces and configuration
- [ ] **Operating System Details**
- OS version and distribution
- Kernel version
- Installed packages and versions
- System services currently running
- Firewall configuration (ufw, iptables)
### Network Configuration
- [ ] **Current Network Setup**
- IP address ranges and subnets
- Domain name currently in use
- SSL certificates (Let's Encrypt, custom CA)
- DNS configuration (local DNS, external)
- Port mappings and exposed services
- Reverse proxy configuration (if any)
## 2. CURRENT SERVICES INVENTORY
### Docker Services
- [ ] **Container Discovery**
- All running containers (`docker ps -a`)
- Docker images in use (`docker images`)
- Docker networks (`docker network ls`)
- Docker volumes and their contents (`docker volume ls`)
- Docker Compose files location and content
### Service-Specific Details
- [ ] **Database Services**
- PostgreSQL: databases, users, data size, configuration
- Redis: configuration, data persistence, memory usage
- InfluxDB: databases, retention policies, data size
- Any other databases (MySQL, MongoDB, SQLite)
- [ ] **Media Services**
- Jellyfin: media library locations, user accounts, plugins
- Immich: photo storage paths, user accounts, configurations
- Other media services (Plex, Emby, etc.)
- [ ] **Web Services**
- Nextcloud: data directory, database backend, user accounts
- Any web applications and their configurations
- Static websites or custom applications
- [ ] **Monitoring & Management**
- Existing monitoring (Prometheus, Grafana, etc.)
- Log management systems
- Backup systems currently in place
- Management interfaces (Portainer, etc.)
## 3. DATA & STORAGE DISCOVERY
### Storage Layout
- [ ] **Current Storage Structure**
- Mount points and filesystem types
- Data directory locations for each service
- Storage usage and capacity
- Backup locations and schedules
- RAID configuration (if any)
### Data Volumes
- [ ] **Critical Data Identification**
- Database data directories
- Media libraries (movies, TV shows, photos)
- User configuration files
- SSL certificates and keys
- Application data and logs
## 4. SECURITY & ACCESS DISCOVERY
### Authentication
- [ ] **Current Auth Systems**
- User accounts and authentication methods
- LDAP/Active Directory integration
- OAuth providers in use
- API keys and service tokens
### Security Configuration
- [ ] **Current Security Measures**
- Firewall rules and exceptions
- VPN configuration (if any)
- fail2ban or intrusion detection
- SSL/TLS configuration
- Password policies and storage
## 5. INTEGRATION & DEPENDENCIES
### Service Dependencies
- [ ] **Inter-service Communication**
- Which services depend on others
- Database connections and credentials
- Shared storage dependencies
- Network communication requirements
### External Integrations
- [ ] **Third-party Services**
- Cloud storage integrations
- Email services for notifications
- DNS providers
- Content delivery networks
- Backup destinations
## 6. PERFORMANCE & USAGE PATTERNS
### Current Performance
- [ ] **Baseline Metrics**
- CPU, memory, and disk usage patterns
- Network bandwidth utilization
- Service response times
- Peak usage times and loads
### User Access Patterns
- [ ] **Usage Analysis**
- Which services are actively used
- User count per service
- Access patterns (internal vs external)
- Critical vs non-critical services
## 7. BACKUP & DISASTER RECOVERY
### Current Backup Strategy
- [ ] **Existing Backups**
- What is currently backed up
- Backup schedules and retention
- Backup destinations (local, remote)
- Recovery procedures and testing
- RTO/RPO requirements
## 8. CONFIGURATION FILES & CUSTOMIZATIONS
### Service Configurations
- [ ] **Custom Configurations**
- Docker Compose files
- Application configuration files
- Environment variables
- Custom scripts and automation
- Cron jobs and systemd services
---
# DISCOVERY EXECUTION PLAN
## Phase 1: Automated Discovery (1-2 hours)
**Goal**: Gather system and service information automatically
### Script 1: System Discovery
```bash
./discovery_scripts/system_info_collector.sh
```
**Collects**: Hardware, OS, network, storage information
### Script 2: Service Discovery
```bash
./discovery_scripts/service_inventory_collector.sh
```
**Collects**: All running services, containers, configurations
### Script 3: Data Discovery
```bash
./discovery_scripts/data_layout_mapper.sh
```
**Collects**: Storage layout, data locations, usage patterns
## Phase 2: Manual Review (2-3 hours)
**Goal**: Validate automated findings and gather missing details
### Review Tasks:
1. **Validate Service Inventory**
- Confirm all services are identified
- Document any custom configurations
- Identify critical vs non-critical services
2. **Security Configuration Review**
- Document authentication methods
- Review firewall and security settings
- Identify certificates and keys
3. **Integration Mapping**
- Map service dependencies
- Document external integrations
- Identify customizations
## Phase 3: Risk Assessment (1 hour)
**Goal**: Identify migration risks based on current state
### Risk Analysis:
1. **Data Loss Risks**
- Identify critical data that must be preserved
- Assess backup completeness
- Plan data migration strategy
2. **Service Disruption Risks**
- Identify dependencies that could cause failures
- Plan service migration order
- Prepare rollback strategies
3. **Configuration Risks**
- Document configurations that must be preserved
- Identify hard-to-migrate customizations
- Plan configuration migration
---
# DELIVERABLES
After completing discovery, we'll have:
1. **Current State Report** - Complete inventory of existing setup
2. **Migration Gap Analysis** - What's missing from current migration plan
3. **Risk Assessment Matrix** - Specific risks and mitigation strategies
4. **Updated Migration Plan** - Revised plan based on actual current state
5. **Rollback Procedures** - Specific procedures for your environment
---
# CRITICAL QUESTIONS TO ANSWER
Before proceeding, we need answers to these key questions:
1. **What is your actual domain name?** (replaces yourdomain.com placeholders)
2. **What services are you currently running?** (to ensure none are missed)
3. **Where is your critical data stored?** (to ensure no data loss)
4. **What are your uptime requirements?** (to plan maintenance windows)
5. **Do you have a staging environment?** (to test migration safely)
6. **What's your rollback tolerance?** (how quickly can you revert if needed)
**Recommendation**: Execute the discovery plan first, then revise the migration approach based on actual current state rather than assumptions.

View File

@@ -0,0 +1,204 @@
#!/bin/bash
#
# OMV-Optimized Discovery Script
# Optimized for OpenMediaVault systems - skips large data drives during migration
#
set -euo pipefail
# --- Configuration ---
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
HOSTNAME=$(hostname -f)
OUTPUT_BASE_DIR="/tmp/system_audit_${HOSTNAME}_${TIMESTAMP}"
DISCOVERY_DIR="${OUTPUT_BASE_DIR}/discovery"
mkdir -p "$DISCOVERY_DIR"
LOG_FILE="${OUTPUT_BASE_DIR}/discovery.log"
# OMV-specific exclusions for data drives that stay in place
OMV_DATA_PATHS=(
"/srv/dev-disk-by-uuid-*" # OMV data disks
"/srv/mergerfs/*" # MergerFS pools
"/mnt/*" # External mounts
"/media/*" # Media mounts
"/export/*" # NFS exports
)
# --- Logging ---
exec > >(tee -a "$LOG_FILE") 2>&1
echo "Starting OMV-Optimized State Discovery on ${HOSTNAME} at $(date)"
echo "Output will be saved in ${OUTPUT_BASE_DIR}"
echo "Excluding OMV data paths: ${OMV_DATA_PATHS[*]}"
echo "-----------------------------------------------------"
# --- Helper Functions ---
print_header() {
echo ""
echo "====================================================="
echo ">= $1"
echo "====================================================="
}
run_command() {
local title="$1"
local command="$2"
local output_file="$3"
print_header "$title"
echo "Running command: $command"
echo "Outputting to: $output_file"
if eval "$command" > "$output_file"; then
echo "Successfully captured $title."
else
echo "Warning: Command for '$title' failed or produced no output." > "$output_file"
fi
}
# --- 1. Infrastructure Discovery ---
infra_discovery() {
local out_dir="${DISCOVERY_DIR}/1_infrastructure"
mkdir -p "$out_dir"
run_command "CPU Information" "lscpu" "${out_dir}/cpu_info.txt"
run_command "Memory Information" "free -h" "${out_dir}/memory_info.txt"
run_command "PCI Devices (including GPU)" "lspci -v" "${out_dir}/pci_devices.txt"
run_command "USB Devices" "lsusb -v" "${out_dir}/usb_devices.txt"
run_command "Block Devices & Storage" "lsblk -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT" "${out_dir}/storage_layout.txt"
run_command "Filesystem Usage" "df -hT" "${out_dir}/disk_usage.txt"
run_command "RAID Status" "cat /proc/mdstat || true" "${out_dir}/raid_status.txt"
run_command "OS & Kernel Version" "cat /etc/os-release && uname -a" "${out_dir}/os_info.txt"
run_command "Network Interfaces" "ip -br a" "${out_dir}/network_interfaces.txt"
run_command "Routing Table" "ip r" "${out_dir}/routing_table.txt"
run_command "DNS Configuration" "cat /etc/resolv.conf" "${out_dir}/dns_config.txt"
run_command "Firewall Status (UFW)" "sudo ufw status verbose || true" "${out_dir}/firewall_ufw.txt"
run_command "Firewall Status (iptables)" "sudo iptables -L -n -v || true" "${out_dir}/firewall_iptables.txt"
# OMV-specific storage information
run_command "OMV Storage Config" "omv-confdbadm read conf.system.storage.filesystem || true" "${out_dir}/omv_filesystems.txt"
run_command "OMV Shares Config" "omv-confdbadm read conf.system.shares.sharedfolder || true" "${out_dir}/omv_shares.txt"
}
# --- 2. Services Inventory ---
services_inventory() {
local out_dir="${DISCOVERY_DIR}/2_services"
mkdir -p "$out_dir"
# Docker
if command -v docker &> /dev/null; then
run_command "Docker Info" "docker info" "${out_dir}/docker_info.txt"
run_command "Docker Running Containers" "docker ps -a" "${out_dir}/docker_ps.txt"
run_command "Docker Images" "docker images" "${out_dir}/docker_images.txt"
run_command "Docker Networks" "docker network ls" "${out_dir}/docker_networks.txt"
run_command "Docker Volumes" "docker volume ls" "${out_dir}/docker_volumes.txt"
print_header "Docker Container Details"
for id in $(docker ps -q); do
local name=$(docker inspect --format '{{.Name}}' "$id" | sed 's,^/,,')
echo "Inspecting container: $name"
docker inspect "$id" > "${out_dir}/container_${name}.json"
done
# OMV-Optimized Docker Compose Search - Skip data directories
print_header "Finding Docker Compose files (OMV-optimized)"
echo "Searching system directories only, excluding data drives..."
# Build exclusion arguments for find command
local exclude_args=""
for path in "${OMV_DATA_PATHS[@]}"; do
exclude_args="$exclude_args -path \"$path\" -prune -o"
done
# Search only essential system paths
find /opt /home /etc /usr/local -maxdepth 5 -name "docker-compose.yml" -o -name "docker-compose.yaml" -o -name "compose.yml" > "${out_dir}/docker_compose_locations.txt" 2>/dev/null || true
echo "Found $(wc -l < "${out_dir}/docker_compose_locations.txt") compose files"
while IFS= read -r file; do
if [ -f "$file" ]; then
sudo cp "$file" "${out_dir}/compose_file_$(basename "$(dirname "$file")").yml" 2>/dev/null || true
fi
done < "${out_dir}/docker_compose_locations.txt"
echo -e "\nContainer Management Tools:"
docker ps --format "table {{.Names}}\t{{.Image}}\t{{.Ports}}" | grep -E "(portainer|watchtower|traefik|nginx-proxy|heimdall|dashboard)" > "${out_dir}/management_containers.txt" || echo "No common management tools detected" > "${out_dir}/management_containers.txt"
echo -e "\nContainer Resource Usage:"
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}" 2>/dev/null | head -20 > "${out_dir}/container_resources.txt"
else
echo "Docker not found." > "${out_dir}/docker_status.txt"
fi
# Systemd Services
run_command "Systemd Services (Enabled)" "systemctl list-unit-files --state=enabled" "${out_dir}/systemd_enabled_services.txt"
run_command "Systemd Services (Running)" "systemctl list-units --type=service --state=running" "${out_dir}/systemd_running_services.txt"
# OMV-specific services
run_command "OMV Engine Status" "systemctl status openmediavault-engined || true" "${out_dir}/omv_engine_status.txt"
run_command "OMV Web Interface Status" "systemctl status nginx || systemctl status apache2 || true" "${out_dir}/omv_web_status.txt"
}
# --- 3. Data & Storage Discovery (OMV-optimized) ---
data_discovery() {
local out_dir="${DISCOVERY_DIR}/3_data_storage"
mkdir -p "$out_dir"
run_command "NFS Exports" "showmount -e localhost || true" "${out_dir}/nfs_exports.txt"
run_command "Mounted File Systems" "mount" "${out_dir}/mounts.txt"
run_command "Samba Shares" "smbstatus -S || true" "${out_dir}/samba_shares.txt"
# OMV configuration exports
print_header "OMV Configuration Backup"
if command -v omv-confdbadm >/dev/null 2>&1; then
omv-confdbadm read > "${out_dir}/omv_full_config.json" 2>/dev/null || echo "Could not export OMV config" > "${out_dir}/omv_config_error.txt"
echo "OMV configuration backed up"
fi
print_header "Critical system directories only"
# Skip data drives - only scan system paths
find /etc /opt /usr/local -name "*.conf" -o -name "*.cfg" -o -name "*.ini" | head -100 > "${out_dir}/system_config_files.txt" 2>/dev/null || true
}
# --- 4. Security & Access Discovery ---
security_discovery() {
local out_dir="${DISCOVERY_DIR}/4_security"
mkdir -p "$out_dir"
run_command "User Accounts" "cat /etc/passwd" "${out_dir}/users.txt"
run_command "Sudoers Configuration" "sudo cat /etc/sudoers" "${out_dir}/sudoers.txt"
run_command "SSH Daemon Configuration" "sudo cat /etc/ssh/sshd_config" "${out_dir}/sshd_config.txt"
run_command "Last Logins" "last -a" "${out_dir}/last_logins.txt"
run_command "Open Ports" "sudo ss -tuln" "${out_dir}/open_ports.txt"
run_command "Cron Jobs (System)" "sudo cat /etc/crontab || true" "${out_dir}/crontab_system.txt"
run_command "Cron Jobs (User)" "for user in \$(cut -f1 -d: /etc/passwd); do crontab -u \"\$user\" -l 2>/dev/null | sed \"s/^/[user] /\" ; done || true" "${out_dir}/crontab_users.txt"
}
# --- 5. Performance & Usage ---
performance_discovery() {
local out_dir="${DISCOVERY_DIR}/5_performance"
mkdir -p "$out_dir"
run_command "Current Processes" "ps aux" "${out_dir}/processes.txt"
run_command "Uptime & Load" "uptime" "${out_dir}/uptime.txt"
run_command "Network Stats" "netstat -s" "${out_dir}/netstat.txt"
run_command "IO Stats" "iostat -x 1 2" "${out_dir}/iostat.txt"
}
# --- Main Execution ---
main() {
infra_discovery
services_inventory
data_discovery
security_discovery
performance_discovery
print_header "Packaging Results"
tar -czf "${OUTPUT_BASE_DIR}.tar.gz" -C "$(dirname "$OUTPUT_BASE_DIR")" "$(basename "$OUTPUT_BASE_DIR")"
echo "-----------------------------------------------------"
echo "OMV-Optimized Discovery complete."
echo "Results packaged in ${OUTPUT_BASE_DIR}.tar.gz"
echo "Data drives ($(echo "${OMV_DATA_PATHS[*]}" | tr ' ' ',')) were excluded from filesystem scan"
}
main

View File

@@ -0,0 +1,149 @@
#!/bin/bash
#
# Targeted Discovery Runner
# Executes specific discovery scripts on devices with partial data
#
set -euo pipefail
SCRIPT_DIR="$(dirname "$0")"
# Device configurations
declare -A PARTIAL_DEVICES
PARTIAL_DEVICES["fedora"]="localhost"
PARTIAL_DEVICES["lenovo420"]="100.98.144.95"
PARTIAL_DEVICES["lenovo"]="192.168.50.181"
PARTIAL_DEVICES["surface"]="100.67.40.97"
declare -A DEVICE_USERS
DEVICE_USERS["fedora"]="jonathan"
DEVICE_USERS["lenovo420"]="jon"
DEVICE_USERS["lenovo"]="jonathan"
DEVICE_USERS["surface"]="jon"
# Targeted scripts to run
SCRIPTS=(
"targeted_security_discovery.sh"
"targeted_data_discovery.sh"
"targeted_performance_discovery.sh"
)
echo "=== Targeted Discovery Runner ==="
echo "Running missing discovery categories on partial devices"
echo "Devices: ${!PARTIAL_DEVICES[@]}"
echo "Scripts: ${SCRIPTS[@]}"
echo "======================================="
run_script_on_device() {
local device=$1
local host=${PARTIAL_DEVICES[$device]}
local user=${DEVICE_USERS[$device]}
local script=$2
echo "[$device] Running $script"
if [ "$host" = "localhost" ]; then
# Local execution
chmod +x "$SCRIPT_DIR/$script"
sudo "$SCRIPT_DIR/$script"
else
# Remote execution
scp "$SCRIPT_DIR/$script" "$user@$host:/tmp/"
ssh "$user@$host" "chmod +x /tmp/$script && sudo /tmp/$script"
fi
echo "[$device] $script completed"
}
collect_results() {
local device=$1
local host=${PARTIAL_DEVICES[$device]}
local user=${DEVICE_USERS[$device]}
local results_dir="/home/jonathan/Coding/HomeAudit/targeted_discovery_results"
mkdir -p "$results_dir"
echo "[$device] Collecting results..."
if [ "$host" = "localhost" ]; then
# Local collection
find /tmp -name "*_discovery_*_*" -type d -newer "$SCRIPT_DIR/$0" -exec cp -r {} "$results_dir/" \;
else
# Remote collection
ssh "$user@$host" "find /tmp -name '*_discovery_*' -type d -newer /tmp/targeted_*_discovery.sh -exec tar -czf {}.tar.gz {} \;" 2>/dev/null || true
scp "$user@$host:/tmp/*_discovery_*.tar.gz" "$results_dir/" 2>/dev/null || echo "[$device] No results to collect"
fi
}
main() {
local target_device=""
local target_script=""
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--device)
target_device="$2"
shift 2
;;
--script)
target_script="$2"
shift 2
;;
--help)
echo "Usage: $0 [--device DEVICE] [--script SCRIPT]"
echo "Devices: ${!PARTIAL_DEVICES[@]}"
echo "Scripts: ${SCRIPTS[@]}"
exit 0
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
# Run on specific device or all devices
if [ -n "$target_device" ]; then
if [[ ! " ${!PARTIAL_DEVICES[@]} " =~ " ${target_device} " ]]; then
echo "Error: Unknown device '$target_device'"
exit 1
fi
devices=("$target_device")
else
devices=("${!PARTIAL_DEVICES[@]}")
fi
# Run specific script or all scripts
if [ -n "$target_script" ]; then
if [[ ! " ${SCRIPTS[@]} " =~ " ${target_script} " ]]; then
echo "Error: Unknown script '$target_script'"
exit 1
fi
scripts=("$target_script")
else
scripts=("${SCRIPTS[@]}")
fi
# Execute targeted discovery
for device in "${devices[@]}"; do
echo "Starting targeted discovery on $device"
for script in "${scripts[@]}"; do
if ! run_script_on_device "$device" "$script"; then
echo "Warning: $script failed on $device, continuing..."
fi
sleep 2 # Brief pause between scripts
done
collect_results "$device"
echo "$device completed"
echo "---"
done
echo "=== Targeted Discovery Complete ==="
echo "Results available in: /home/jonathan/Coding/HomeAudit/targeted_discovery_results/"
ls -la /home/jonathan/Coding/HomeAudit/targeted_discovery_results/ 2>/dev/null || echo "No results directory created yet"
}
main "$@"

View File

@@ -0,0 +1,959 @@
#!/bin/bash
set -euo pipefail
# Service Inventory Collector
# Comprehensive discovery of all running services, containers, and configurations
# Part of the Current State Discovery Framework
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISCOVERY_DIR="${SCRIPT_DIR}/results"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
REPORT_FILE="${DISCOVERY_DIR}/service_inventory_${TIMESTAMP}.json"
# Create discovery directory
mkdir -p "$DISCOVERY_DIR"
main() {
echo "🔍 Starting service inventory collection..."
# Initialize JSON report
cat > "$REPORT_FILE" << 'EOF'
{
"discovery_metadata": {
"timestamp": "",
"hostname": "",
"discovery_version": "1.0"
},
"docker_services": {},
"system_services": {},
"web_services": {},
"databases": {},
"media_services": {},
"monitoring_services": {},
"configuration_files": {},
"custom_applications": {}
}
EOF
collect_metadata
collect_docker_services
collect_system_services
collect_web_services
collect_databases
collect_media_services
collect_monitoring_services
collect_configuration_files
collect_custom_applications
echo "✅ Service inventory complete: $REPORT_FILE"
generate_summary
}
collect_metadata() {
echo "📋 Collecting metadata..."
jq --arg timestamp "$(date -Iseconds)" \
--arg hostname "$(hostname)" \
'.discovery_metadata.timestamp = $timestamp | .discovery_metadata.hostname = $hostname' \
"$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_docker_services() {
echo "🐳 Collecting Docker services..."
local docker_services=$(cat << 'EOF'
{
"containers": [],
"images": [],
"networks": [],
"volumes": [],
"compose_files": [],
"docker_info": {}
}
EOF
)
if ! command -v docker &>/dev/null; then
jq --argjson docker_services "$docker_services" '.docker_services = $docker_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
return
fi
# Docker containers
local containers='[]'
if docker info &>/dev/null; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local container_data=$(echo "$line" | jq -R 'split(",") | {id: .[0], name: .[1], image: .[2], status: .[3], ports: .[4], created: .[5]}')
containers=$(echo "$containers" | jq ". + [$container_data]")
fi
done < <(docker ps -a --format "{{.ID}},{{.Names}},{{.Image}},{{.Status}},{{.Ports}},{{.CreatedAt}}" 2>/dev/null || echo "")
fi
# Docker images
local images='[]'
if docker info &>/dev/null; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local image_data=$(echo "$line" | jq -R 'split(",") | {repository: .[0], tag: .[1], id: .[2], created: .[3], size: .[4]}')
images=$(echo "$images" | jq ". + [$image_data]")
fi
done < <(docker images --format "{{.Repository}},{{.Tag}},{{.ID}},{{.CreatedAt}},{{.Size}}" 2>/dev/null || echo "")
fi
# Docker networks
local networks='[]'
if docker info &>/dev/null; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local network_name=$(echo "$line" | awk '{print $1}')
local network_inspect=$(docker network inspect "$network_name" 2>/dev/null | jq '.[0] | {name: .Name, driver: .Driver, scope: .Scope, subnet: (.IPAM.Config[0].Subnet // ""), gateway: (.IPAM.Config[0].Gateway // "")}')
networks=$(echo "$networks" | jq ". + [$network_inspect]")
fi
done < <(docker network ls --format "{{.Name}}" 2>/dev/null | grep -v "^$" || echo "")
fi
# Docker volumes
local volumes='[]'
if docker info &>/dev/null; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local volume_name=$(echo "$line" | awk '{print $1}')
local volume_inspect=$(docker volume inspect "$volume_name" 2>/dev/null | jq '.[0] | {name: .Name, driver: .Driver, mountpoint: .Mountpoint}')
# Get volume size
local mountpoint=$(echo "$volume_inspect" | jq -r '.mountpoint')
local volume_size="unknown"
if [[ -d "$mountpoint" ]]; then
volume_size=$(du -sh "$mountpoint" 2>/dev/null | awk '{print $1}' || echo "unknown")
fi
volume_inspect=$(echo "$volume_inspect" | jq --arg size "$volume_size" '. + {size: $size}')
volumes=$(echo "$volumes" | jq ". + [$volume_inspect]")
fi
done < <(docker volume ls --format "{{.Name}}" 2>/dev/null | grep -v "^$" || echo "")
fi
# Find Docker Compose files
local compose_files='[]'
local compose_locations=(
"/opt"
"/home"
"/var/lib"
"$HOME"
"$(pwd)"
)
for location in "${compose_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r compose_file; do
if [[ -f "$compose_file" ]]; then
local compose_info=$(jq -n --arg path "$compose_file" --arg size "$(wc -l < "$compose_file" 2>/dev/null || echo 0)" \
'{path: $path, lines: ($size | tonumber)}')
compose_files=$(echo "$compose_files" | jq ". + [$compose_info]")
fi
done < <(find "$location" -name "docker-compose*.yml" -o -name "compose*.yml" 2>/dev/null | head -20)
fi
done
# Docker info
local docker_info='{}'
if docker info &>/dev/null; then
local docker_version=$(docker version --format '{{.Server.Version}}' 2>/dev/null || echo "unknown")
local storage_driver=$(docker info --format '{{.Driver}}' 2>/dev/null || echo "unknown")
local total_containers=$(docker info --format '{{.Containers}}' 2>/dev/null || echo "0")
local running_containers=$(docker info --format '{{.ContainersRunning}}' 2>/dev/null || echo "0")
docker_info=$(jq -n --arg version "$docker_version" \
--arg driver "$storage_driver" \
--arg total "$total_containers" \
--arg running "$running_containers" \
'{version: $version, storage_driver: $driver, total_containers: ($total | tonumber), running_containers: ($running | tonumber)}')
fi
docker_services=$(echo "$docker_services" | jq --argjson containers "$containers" \
--argjson images "$images" \
--argjson networks "$networks" \
--argjson volumes "$volumes" \
--argjson compose_files "$compose_files" \
--argjson docker_info "$docker_info" \
'.containers = $containers | .images = $images | .networks = $networks | .volumes = $volumes | .compose_files = $compose_files | .docker_info = $docker_info')
jq --argjson docker_services "$docker_services" '.docker_services = $docker_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_system_services() {
echo "⚙️ Collecting system services..."
local system_services=$(cat << 'EOF'
{
"systemd_services": [],
"cron_jobs": [],
"startup_scripts": [],
"background_processes": []
}
EOF
)
# Systemd services
local systemd_services='[]'
if command -v systemctl &>/dev/null; then
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local service_data=$(echo "$line" | jq -R 'split(" ") | {name: .[0], load: .[1], active: .[2], sub: .[3], description: (.[4:] | join(" "))}')
systemd_services=$(echo "$systemd_services" | jq ". + [$service_data]")
fi
done < <(systemctl list-units --type=service --no-pager --no-legend --state=active | head -50)
fi
# Cron jobs
local cron_jobs='[]'
# System cron jobs
if [[ -d /etc/cron.d ]]; then
for cron_file in /etc/cron.d/*; do
if [[ -f "$cron_file" ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^[^#] && -n "$line" ]]; then
local job_info=$(jq -n --arg file "$(basename "$cron_file")" --arg job "$line" \
'{source: $file, type: "system", job: $job}')
cron_jobs=$(echo "$cron_jobs" | jq ". + [$job_info]")
fi
done < "$cron_file"
fi
done
fi
# User cron jobs
if command -v crontab &>/dev/null; then
local user_cron=$(crontab -l 2>/dev/null || echo "")
if [[ -n "$user_cron" ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^[^#] && -n "$line" ]]; then
local job_info=$(jq -n --arg user "$(whoami)" --arg job "$line" \
'{source: $user, type: "user", job: $job}')
cron_jobs=$(echo "$cron_jobs" | jq ". + [$job_info]")
fi
done <<< "$user_cron"
fi
fi
# Startup scripts
local startup_scripts='[]'
local startup_locations=("/etc/init.d" "/etc/systemd/system" "/home/*/.*profile" "/etc/profile.d")
for location_pattern in "${startup_locations[@]}"; do
for location in $location_pattern; do
if [[ -d "$location" ]]; then
while IFS= read -r script_file; do
if [[ -f "$script_file" && -x "$script_file" ]]; then
local script_info=$(jq -n --arg path "$script_file" --arg name "$(basename "$script_file")" \
'{path: $path, name: $name}')
startup_scripts=$(echo "$startup_scripts" | jq ". + [$script_info]")
fi
done < <(find "$location" -maxdepth 1 -type f 2>/dev/null | head -20)
fi
done
done
# Background processes (excluding kernel threads)
local background_processes='[]'
while IFS= read -r line; do
if [[ -n "$line" ]]; then
local process_data=$(echo "$line" | jq -R 'split(" ") | {pid: .[0], user: .[1], cpu: .[2], mem: .[3], command: (.[4:] | join(" "))}')
background_processes=$(echo "$background_processes" | jq ". + [$process_data]")
fi
done < <(ps aux --no-headers | grep -v "^\[" | head -30)
system_services=$(echo "$system_services" | jq --argjson systemd_services "$systemd_services" \
--argjson cron_jobs "$cron_jobs" \
--argjson startup_scripts "$startup_scripts" \
--argjson background_processes "$background_processes" \
'.systemd_services = $systemd_services | .cron_jobs = $cron_jobs | .startup_scripts = $startup_scripts | .background_processes = $background_processes')
jq --argjson system_services "$system_services" '.system_services = $system_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_web_services() {
echo "🌐 Collecting web services..."
local web_services=$(cat << 'EOF'
{
"web_servers": [],
"reverse_proxies": [],
"ssl_certificates": [],
"web_applications": []
}
EOF
)
# Detect web servers
local web_servers='[]'
# Check for Nginx
if command -v nginx &>/dev/null; then
local nginx_version=$(nginx -v 2>&1 | cut -d: -f2 | tr -d ' ')
local nginx_config=$(nginx -T 2>/dev/null | head -20 | jq -R . | jq -s 'join("\n")')
local nginx_info=$(jq -n --arg version "$nginx_version" --argjson config "$nginx_config" \
'{name: "nginx", version: $version, config_sample: $config}')
web_servers=$(echo "$web_servers" | jq ". + [$nginx_info]")
fi
# Check for Apache
if command -v apache2 &>/dev/null || command -v httpd &>/dev/null; then
local apache_cmd="apache2"
command -v httpd &>/dev/null && apache_cmd="httpd"
local apache_version=$($apache_cmd -v 2>/dev/null | head -1 | cut -d: -f2 | tr -d ' ')
local apache_info=$(jq -n --arg version "$apache_version" \
'{name: "apache", version: $version}')
web_servers=$(echo "$web_servers" | jq ". + [$apache_info]")
fi
# Check for Traefik (in containers)
local traefik_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i traefik || echo "")
if [[ -n "$traefik_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local traefik_info=$(jq -n --arg container "$container" \
'{name: "traefik", type: "container", container_name: $container}')
web_servers=$(echo "$web_servers" | jq ". + [$traefik_info]")
fi
done <<< "$traefik_containers"
fi
# Detect reverse proxies
local reverse_proxies='[]'
# This would be detected above in web servers, but we can add specific proxy detection
# SSL certificates
local ssl_certificates='[]'
local cert_locations=("/etc/ssl/certs" "/etc/letsencrypt/live" "/opt/*/ssl" "/home/*/ssl")
for location_pattern in "${cert_locations[@]}"; do
for location in $location_pattern; do
if [[ -d "$location" ]]; then
while IFS= read -r cert_file; do
if [[ -f "$cert_file" ]]; then
local cert_info=$(openssl x509 -in "$cert_file" -text -noout 2>/dev/null | head -20 || echo "")
local subject=$(echo "$cert_info" | grep "Subject:" | head -1 | cut -d: -f2-)
local issuer=$(echo "$cert_info" | grep "Issuer:" | head -1 | cut -d: -f2-)
local not_after=$(echo "$cert_info" | grep "Not After" | head -1 | cut -d: -f2-)
if [[ -n "$subject" ]]; then
local cert_data=$(jq -n --arg path "$cert_file" --arg subject "$subject" --arg issuer "$issuer" --arg expires "$not_after" \
'{path: $path, subject: $subject, issuer: $issuer, expires: $expires}')
ssl_certificates=$(echo "$ssl_certificates" | jq ". + [$cert_data]")
fi
fi
done < <(find "$location" -name "*.crt" -o -name "*.pem" -o -name "cert.pem" 2>/dev/null | head -10)
fi
done
done
# Web applications (detect common patterns)
local web_applications='[]'
# Look for common web app directories
local webapp_locations=("/var/www" "/opt" "/home/*/www" "/srv")
for location_pattern in "${webapp_locations[@]}"; do
for location in $location_pattern; do
if [[ -d "$location" ]]; then
while IFS= read -r app_dir; do
if [[ -d "$app_dir" ]]; then
local app_name=$(basename "$app_dir")
local app_type="unknown"
# Detect application type
if [[ -f "$app_dir/index.php" ]]; then
app_type="php"
elif [[ -f "$app_dir/package.json" ]]; then
app_type="nodejs"
elif [[ -f "$app_dir/requirements.txt" ]]; then
app_type="python"
elif [[ -f "$app_dir/index.html" ]]; then
app_type="static"
fi
local app_info=$(jq -n --arg name "$app_name" --arg path "$app_dir" --arg type "$app_type" \
'{name: $name, path: $path, type: $type}')
web_applications=$(echo "$web_applications" | jq ". + [$app_info]")
fi
done < <(find "$location" -maxdepth 2 -type d 2>/dev/null | head -10)
fi
done
done
web_services=$(echo "$web_services" | jq --argjson web_servers "$web_servers" \
--argjson reverse_proxies "$reverse_proxies" \
--argjson ssl_certificates "$ssl_certificates" \
--argjson web_applications "$web_applications" \
'.web_servers = $web_servers | .reverse_proxies = $reverse_proxies | .ssl_certificates = $ssl_certificates | .web_applications = $web_applications')
jq --argjson web_services "$web_services" '.web_services = $web_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_databases() {
echo "🗃️ Collecting database services..."
local databases=$(cat << 'EOF'
{
"postgresql": [],
"mysql": [],
"redis": [],
"influxdb": [],
"sqlite": [],
"other": []
}
EOF
)
# PostgreSQL
local postgresql='[]'
# Check for PostgreSQL containers
local postgres_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(postgres|postgresql)" || echo "")
if [[ -n "$postgres_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local pg_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
env_vars: .Config.Env,
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
postgresql=$(echo "$postgresql" | jq ". + [$pg_info]")
fi
done <<< "$postgres_containers"
fi
# Check for system PostgreSQL
if command -v psql &>/dev/null; then
local pg_version=$(psql --version 2>/dev/null | head -1 || echo "unknown")
local pg_system_info=$(jq -n --arg version "$pg_version" --arg type "system" \
'{type: $type, version: $version}')
postgresql=$(echo "$postgresql" | jq ". + [$pg_system_info]")
fi
# MySQL/MariaDB
local mysql='[]'
# Check for MySQL containers
local mysql_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(mysql|mariadb)" || echo "")
if [[ -n "$mysql_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local mysql_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
env_vars: .Config.Env,
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
mysql=$(echo "$mysql" | jq ". + [$mysql_info]")
fi
done <<< "$mysql_containers"
fi
# Check for system MySQL
if command -v mysql &>/dev/null; then
local mysql_version=$(mysql --version 2>/dev/null | head -1 || echo "unknown")
local mysql_system_info=$(jq -n --arg version "$mysql_version" --arg type "system" \
'{type: $type, version: $version}')
mysql=$(echo "$mysql" | jq ". + [$mysql_system_info]")
fi
# Redis
local redis='[]'
# Check for Redis containers
local redis_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i redis || echo "")
if [[ -n "$redis_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local redis_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
redis=$(echo "$redis" | jq ". + [$redis_info]")
fi
done <<< "$redis_containers"
fi
# Check for system Redis
if command -v redis-server &>/dev/null; then
local redis_version=$(redis-server --version 2>/dev/null | head -1 || echo "unknown")
local redis_system_info=$(jq -n --arg version "$redis_version" --arg type "system" \
'{type: $type, version: $version}')
redis=$(echo "$redis" | jq ". + [$redis_system_info]")
fi
# InfluxDB
local influxdb='[]'
# Check for InfluxDB containers
local influx_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i influx || echo "")
if [[ -n "$influx_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local influx_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
influxdb=$(echo "$influxdb" | jq ". + [$influx_info]")
fi
done <<< "$influx_containers"
fi
# SQLite databases
local sqlite='[]'
local sqlite_locations=("/var/lib" "/opt" "/home" "/data")
for location in "${sqlite_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r sqlite_file; do
if [[ -f "$sqlite_file" ]]; then
local sqlite_size=$(du -h "$sqlite_file" 2>/dev/null | awk '{print $1}' || echo "unknown")
local sqlite_info=$(jq -n --arg path "$sqlite_file" --arg size "$sqlite_size" \
'{path: $path, size: $size}')
sqlite=$(echo "$sqlite" | jq ". + [$sqlite_info]")
fi
done < <(find "$location" -name "*.db" -o -name "*.sqlite" -o -name "*.sqlite3" 2>/dev/null | head -10)
fi
done
# Other databases
local other='[]'
local other_db_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(mongo|cassandra|elasticsearch|neo4j)" || echo "")
if [[ -n "$other_db_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image
}' || echo '{}')
other=$(echo "$other" | jq ". + [$other_info]")
fi
done <<< "$other_db_containers"
fi
databases=$(echo "$databases" | jq --argjson postgresql "$postgresql" \
--argjson mysql "$mysql" \
--argjson redis "$redis" \
--argjson influxdb "$influxdb" \
--argjson sqlite "$sqlite" \
--argjson other "$other" \
'.postgresql = $postgresql | .mysql = $mysql | .redis = $redis | .influxdb = $influxdb | .sqlite = $sqlite | .other = $other')
jq --argjson databases "$databases" '.databases = $databases' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_media_services() {
echo "📺 Collecting media services..."
local media_services=$(cat << 'EOF'
{
"jellyfin": [],
"plex": [],
"immich": [],
"nextcloud": [],
"media_libraries": [],
"other_media": []
}
EOF
)
# Jellyfin
local jellyfin='[]'
local jellyfin_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i jellyfin || echo "")
if [[ -n "$jellyfin_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local jellyfin_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
jellyfin=$(echo "$jellyfin" | jq ". + [$jellyfin_info]")
fi
done <<< "$jellyfin_containers"
fi
# Plex
local plex='[]'
local plex_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i plex || echo "")
if [[ -n "$plex_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local plex_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
plex=$(echo "$plex" | jq ". + [$plex_info]")
fi
done <<< "$plex_containers"
fi
# Immich
local immich='[]'
local immich_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i immich || echo "")
if [[ -n "$immich_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local immich_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
immich=$(echo "$immich" | jq ". + [$immich_info]")
fi
done <<< "$immich_containers"
fi
# Nextcloud
local nextcloud='[]'
local nextcloud_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i nextcloud || echo "")
if [[ -n "$nextcloud_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local nextcloud_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}],
mounts: [.Mounts[] | {source: .Source, destination: .Destination, type: .Type}]
}' || echo '{}')
nextcloud=$(echo "$nextcloud" | jq ". + [$nextcloud_info]")
fi
done <<< "$nextcloud_containers"
fi
# Media libraries
local media_libraries='[]'
local media_locations=("/media" "/mnt" "/data" "/home/*/Media" "/opt/media")
for location_pattern in "${media_locations[@]}"; do
for location in $location_pattern; do
if [[ -d "$location" ]]; then
local media_size=$(du -sh "$location" 2>/dev/null | awk '{print $1}' || echo "unknown")
local media_count=$(find "$location" -type f 2>/dev/null | wc -l || echo "0")
local media_info=$(jq -n --arg path "$location" --arg size "$media_size" --arg files "$media_count" \
'{path: $path, size: $size, file_count: ($files | tonumber)}')
media_libraries=$(echo "$media_libraries" | jq ". + [$media_info]")
fi
done
done
# Other media services
local other_media='[]'
local other_media_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(sonarr|radarr|bazarr|lidarr|prowlarr|transmission|deluge)" || echo "")
if [[ -n "$other_media_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
}' || echo '{}')
other_media=$(echo "$other_media" | jq ". + [$other_info]")
fi
done <<< "$other_media_containers"
fi
media_services=$(echo "$media_services" | jq --argjson jellyfin "$jellyfin" \
--argjson plex "$plex" \
--argjson immich "$immich" \
--argjson nextcloud "$nextcloud" \
--argjson media_libraries "$media_libraries" \
--argjson other_media "$other_media" \
'.jellyfin = $jellyfin | .plex = $plex | .immich = $immich | .nextcloud = $nextcloud | .media_libraries = $media_libraries | .other_media = $other_media')
jq --argjson media_services "$media_services" '.media_services = $media_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_monitoring_services() {
echo "📊 Collecting monitoring services..."
local monitoring_services=$(cat << 'EOF'
{
"prometheus": [],
"grafana": [],
"influxdb": [],
"log_management": [],
"uptime_monitoring": [],
"other_monitoring": []
}
EOF
)
# Prometheus
local prometheus='[]'
local prometheus_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i prometheus || echo "")
if [[ -n "$prometheus_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local prometheus_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}]
}' || echo '{}')
prometheus=$(echo "$prometheus" | jq ". + [$prometheus_info]")
fi
done <<< "$prometheus_containers"
fi
# Grafana
local grafana='[]'
local grafana_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -i grafana || echo "")
if [[ -n "$grafana_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local grafana_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
ports: [.NetworkSettings.Ports | to_entries[] | {port: .key, bindings: .value}]
}' || echo '{}')
grafana=$(echo "$grafana" | jq ". + [$grafana_info]")
fi
done <<< "$grafana_containers"
fi
# Log management
local log_management='[]'
local log_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(elastic|kibana|logstash|fluentd|loki)" || echo "")
if [[ -n "$log_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local log_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
}' || echo '{}')
log_management=$(echo "$log_management" | jq ". + [$log_info]")
fi
done <<< "$log_containers"
fi
# Other monitoring
local other_monitoring='[]'
local other_containers=$(docker ps --format "{{.Names}}" 2>/dev/null | grep -E "(portainer|watchtower|node-exporter|cadvisor)" || echo "")
if [[ -n "$other_containers" ]]; then
while IFS= read -r container; do
if [[ -n "$container" ]]; then
local other_info=$(docker inspect "$container" 2>/dev/null | jq '.[0] | {
container_name: .Name,
image: .Config.Image,
service_type: (.Config.Image | split("/")[1] // .Config.Image | split(":")[0])
}' || echo '{}')
other_monitoring=$(echo "$other_monitoring" | jq ". + [$other_info]")
fi
done <<< "$other_containers"
fi
monitoring_services=$(echo "$monitoring_services" | jq --argjson prometheus "$prometheus" \
--argjson grafana "$grafana" \
--argjson log_management "$log_management" \
--argjson other_monitoring "$other_monitoring" \
'.prometheus = $prometheus | .grafana = $grafana | .log_management = $log_management | .other_monitoring = $other_monitoring')
jq --argjson monitoring_services "$monitoring_services" '.monitoring_services = $monitoring_services' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_configuration_files() {
echo "📝 Collecting configuration files..."
local configuration_files=$(cat << 'EOF'
{
"docker_compose_files": [],
"env_files": [],
"config_directories": [],
"ssl_certificates": [],
"backup_configurations": []
}
EOF
)
# Docker Compose files (more detailed than before)
local docker_compose_files='[]'
local compose_locations=("/opt" "/home" "/var/lib" "$(pwd)" "/docker" "/containers")
for location in "${compose_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r compose_file; do
if [[ -f "$compose_file" ]]; then
local compose_services=$(grep -E "^ [a-zA-Z]" "$compose_file" | awk -F: '{print $1}' | tr -d ' ' | jq -R . | jq -s . 2>/dev/null || echo '[]')
local compose_networks=$(grep -A 10 "^networks:" "$compose_file" | grep -E "^ [a-zA-Z]" | awk -F: '{print $1}' | tr -d ' ' | jq -R . | jq -s . 2>/dev/null || echo '[]')
local compose_info=$(jq -n --arg path "$compose_file" \
--arg size "$(wc -l < "$compose_file" 2>/dev/null || echo 0)" \
--argjson services "$compose_services" \
--argjson networks "$compose_networks" \
'{path: $path, lines: ($size | tonumber), services: $services, networks: $networks}')
docker_compose_files=$(echo "$docker_compose_files" | jq ". + [$compose_info]")
fi
done < <(find "$location" -name "docker-compose*.yml" -o -name "compose*.yml" 2>/dev/null | head -20)
fi
done
# Environment files
local env_files='[]'
for location in "${compose_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r env_file; do
if [[ -f "$env_file" ]]; then
local env_vars_count=$(grep -c "=" "$env_file" 2>/dev/null || echo "0")
local env_info=$(jq -n --arg path "$env_file" --arg vars "$env_vars_count" \
'{path: $path, variable_count: ($vars | tonumber)}')
env_files=$(echo "$env_files" | jq ". + [$env_info]")
fi
done < <(find "$location" -name ".env*" -o -name "*.env" 2>/dev/null | head -20)
fi
done
# Configuration directories
local config_directories='[]'
local config_locations=("/etc" "/opt/*/config" "/home/*/config" "/var/lib/*/config")
for location_pattern in "${config_locations[@]}"; do
for location in $location_pattern; do
if [[ -d "$location" ]]; then
local config_size=$(du -sh "$location" 2>/dev/null | awk '{print $1}' || echo "unknown")
local config_files=$(find "$location" -type f 2>/dev/null | wc -l || echo "0")
local config_info=$(jq -n --arg path "$location" --arg size "$config_size" --arg files "$config_files" \
'{path: $path, size: $size, file_count: ($files | tonumber)}')
config_directories=$(echo "$config_directories" | jq ". + [$config_info]")
fi
done
done
configuration_files=$(echo "$configuration_files" | jq --argjson docker_compose_files "$docker_compose_files" \
--argjson env_files "$env_files" \
--argjson config_directories "$config_directories" \
'.docker_compose_files = $docker_compose_files | .env_files = $env_files | .config_directories = $config_directories')
jq --argjson configuration_files "$configuration_files" '.configuration_files = $configuration_files' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_custom_applications() {
echo "🔧 Collecting custom applications..."
local custom_applications=$(cat << 'EOF'
{
"custom_scripts": [],
"python_applications": [],
"nodejs_applications": [],
"automation_tools": [],
"development_tools": []
}
EOF
)
# Custom scripts
local custom_scripts='[]'
local script_locations=("/opt" "/home/*/scripts" "/usr/local/bin" "$(pwd)")
for location in "${script_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r script_file; do
if [[ -f "$script_file" && -x "$script_file" ]]; then
local script_lines=$(wc -l < "$script_file" 2>/dev/null || echo "0")
local script_type="unknown"
# Determine script type
if [[ "$script_file" == *.py ]]; then
script_type="python"
elif [[ "$script_file" == *.sh ]]; then
script_type="bash"
elif [[ "$script_file" == *.js ]]; then
script_type="javascript"
else
local shebang=$(head -1 "$script_file" 2>/dev/null || echo "")
if [[ "$shebang" =~ python ]]; then
script_type="python"
elif [[ "$shebang" =~ bash ]]; then
script_type="bash"
fi
fi
local script_info=$(jq -n --arg path "$script_file" --arg type "$script_type" --arg lines "$script_lines" \
'{path: $path, type: $type, lines: ($lines | tonumber)}')
custom_scripts=$(echo "$custom_scripts" | jq ". + [$script_info]")
fi
done < <(find "$location" -type f -name "*.py" -o -name "*.sh" -o -name "*.js" 2>/dev/null | head -20)
fi
done
# Python applications
local python_applications='[]'
local python_locations=("/opt" "/home" "/var/lib")
for location in "${python_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r python_app; do
if [[ -f "$python_app/requirements.txt" || -f "$python_app/setup.py" || -f "$python_app/pyproject.toml" ]]; then
local app_name=$(basename "$python_app")
local has_requirements=$(test -f "$python_app/requirements.txt" && echo "true" || echo "false")
local has_venv=$(test -d "$python_app/venv" -o -d "$python_app/.venv" && echo "true" || echo "false")
local app_info=$(jq -n --arg name "$app_name" --arg path "$python_app" --arg requirements "$has_requirements" --arg venv "$has_venv" \
'{name: $name, path: $path, has_requirements: ($requirements | test("true")), has_virtualenv: ($venv | test("true"))}')
python_applications=$(echo "$python_applications" | jq ". + [$app_info]")
fi
done < <(find "$location" -type d -maxdepth 3 2>/dev/null)
fi
done
# Node.js applications
local nodejs_applications='[]'
for location in "${python_locations[@]}"; do
if [[ -d "$location" ]]; then
while IFS= read -r nodejs_app; do
if [[ -f "$nodejs_app/package.json" ]]; then
local app_name=$(basename "$nodejs_app")
local has_node_modules=$(test -d "$nodejs_app/node_modules" && echo "true" || echo "false")
local app_info=$(jq -n --arg name "$app_name" --arg path "$nodejs_app" --arg modules "$has_node_modules" \
'{name: $name, path: $path, has_node_modules: ($modules | test("true"))}')
nodejs_applications=$(echo "$nodejs_applications" | jq ". + [$app_info]")
fi
done < <(find "$location" -name "package.json" 2>/dev/null | head -10 | xargs dirname)
fi
done
custom_applications=$(echo "$custom_applications" | jq --argjson custom_scripts "$custom_scripts" \
--argjson python_applications "$python_applications" \
--argjson nodejs_applications "$nodejs_applications" \
'.custom_scripts = $custom_scripts | .python_applications = $python_applications | .nodejs_applications = $nodejs_applications')
jq --argjson custom_applications "$custom_applications" '.custom_applications = $custom_applications' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
generate_summary() {
echo ""
echo "📋 SERVICE INVENTORY SUMMARY"
echo "=========================="
# Extract key counts for summary
local containers_count=$(jq '.docker_services.containers | length' "$REPORT_FILE")
local images_count=$(jq '.docker_services.images | length' "$REPORT_FILE")
local compose_files_count=$(jq '.configuration_files.docker_compose_files | length' "$REPORT_FILE")
local databases_count=$(jq '[.databases.postgresql, .databases.mysql, .databases.redis, .databases.influxdb] | add | length' "$REPORT_FILE")
local web_servers_count=$(jq '.web_services.web_servers | length' "$REPORT_FILE")
local media_services_count=$(jq '[.media_services.jellyfin, .media_services.plex, .media_services.immich, .media_services.nextcloud] | add | length' "$REPORT_FILE")
echo "Docker Containers: $containers_count"
echo "Docker Images: $images_count"
echo "Compose Files: $compose_files_count"
echo "Databases: $databases_count"
echo "Web Servers: $web_servers_count"
echo "Media Services: $media_services_count"
echo ""
echo "Full report: $REPORT_FILE"
echo "Next: Run data_layout_mapper.sh"
}
# Execute main function
main "$@"

View File

@@ -0,0 +1,517 @@
#!/bin/bash
set -euo pipefail
# System Information Collector
# Comprehensive discovery of hardware, OS, network, and storage configuration
# Part of the Current State Discovery Framework
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DISCOVERY_DIR="${SCRIPT_DIR}/results"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
REPORT_FILE="${DISCOVERY_DIR}/system_discovery_${TIMESTAMP}.json"
# Create discovery directory
mkdir -p "$DISCOVERY_DIR"
main() {
echo "🔍 Starting system information collection..."
# Initialize JSON report
cat > "$REPORT_FILE" << 'EOF'
{
"discovery_metadata": {
"timestamp": "",
"hostname": "",
"discovery_version": "1.0"
},
"hardware": {},
"operating_system": {},
"network": {},
"storage": {},
"performance": {}
}
EOF
collect_metadata
collect_hardware_info
collect_os_info
collect_network_info
collect_storage_info
collect_performance_baseline
echo "✅ System discovery complete: $REPORT_FILE"
generate_summary
}
collect_metadata() {
echo "📋 Collecting metadata..."
jq --arg timestamp "$(date -Iseconds)" \
--arg hostname "$(hostname)" \
'.discovery_metadata.timestamp = $timestamp | .discovery_metadata.hostname = $hostname' \
"$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_hardware_info() {
echo "🖥️ Collecting hardware information..."
local cpu_info memory_info gpu_info storage_devices
# CPU Information
cpu_info=$(cat << 'EOF'
{
"model": "",
"cores": 0,
"threads": 0,
"architecture": "",
"flags": []
}
EOF
)
if [[ -f /proc/cpuinfo ]]; then
local cpu_model=$(grep "model name" /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)
local cpu_cores=$(grep "cpu cores" /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)
local cpu_threads=$(grep "processor" /proc/cpuinfo | wc -l)
local cpu_arch=$(uname -m)
local cpu_flags=$(grep "flags" /proc/cpuinfo | head -1 | cut -d: -f2 | xargs | tr ' ' '\n' | jq -R . | jq -s .)
cpu_info=$(echo "$cpu_info" | jq --arg model "$cpu_model" \
--argjson cores "${cpu_cores:-1}" \
--argjson threads "$cpu_threads" \
--arg arch "$cpu_arch" \
--argjson flags "$cpu_flags" \
'.model = $model | .cores = $cores | .threads = $threads | .architecture = $arch | .flags = $flags')
fi
# Memory Information
memory_info=$(cat << 'EOF'
{
"total_gb": 0,
"available_gb": 0,
"swap_gb": 0,
"details": {}
}
EOF
)
if [[ -f /proc/meminfo ]]; then
local mem_total=$(grep "MemTotal" /proc/meminfo | awk '{print int($2/1024/1024)}')
local mem_available=$(grep "MemAvailable" /proc/meminfo | awk '{print int($2/1024/1024)}')
local swap_total=$(grep "SwapTotal" /proc/meminfo | awk '{print int($2/1024/1024)}')
local mem_details=$(grep -E "(MemTotal|MemAvailable|MemFree|Buffers|Cached|SwapTotal)" /proc/meminfo |
awk '{print "\"" tolower($1) "\":" int($2)}' | tr '\n' ',' | sed 's/,$//' | sed 's/://g')
memory_info=$(echo "$memory_info" | jq --argjson total "$mem_total" \
--argjson available "$mem_available" \
--argjson swap "$swap_total" \
--argjson details "{$mem_details}" \
'.total_gb = $total | .available_gb = $available | .swap_gb = $swap | .details = $details')
fi
# GPU Information
gpu_info='[]'
# Check for NVIDIA GPUs
if command -v nvidia-smi &>/dev/null; then
local nvidia_gpus=$(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader,nounits 2>/dev/null |
awk -F',' '{print "{\"name\":\"" $1 "\",\"memory_mb\":" $2 ",\"vendor\":\"NVIDIA\"}"}' |
jq -s .)
gpu_info=$(echo "$gpu_info" | jq ". + $nvidia_gpus")
fi
# Check for AMD/Intel GPUs via lspci
local other_gpus=$(lspci | grep -i vga |
awk '{print "{\"name\":\"" substr($0, index($0,$5)) "\",\"vendor\":\"" $4 "\",\"detected_via\":\"lspci\"}"}' |
jq -s .)
gpu_info=$(echo "$gpu_info" | jq ". + $other_gpus")
# Storage Devices
storage_devices='[]'
if command -v lsblk &>/dev/null; then
# Get block devices with detailed info
while IFS= read -r line; do
if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]*)[[:space:]]+([^[:space:]]*) ]]; then
local device="${BASH_REMATCH[1]}"
local size="${BASH_REMATCH[2]}"
local type="${BASH_REMATCH[3]}"
local mountpoint="${BASH_REMATCH[4]}"
local fstype="${BASH_REMATCH[5]}"
# Check if it's rotational (HDD vs SSD)
local rotational="unknown"
if [[ -f "/sys/block/$device/queue/rotational" ]]; then
if [[ $(cat "/sys/block/$device/queue/rotational" 2>/dev/null) == "0" ]]; then
rotational="ssd"
else
rotational="hdd"
fi
fi
local device_info=$(jq -n --arg name "$device" \
--arg size "$size" \
--arg type "$type" \
--arg mount "$mountpoint" \
--arg fs "$fstype" \
--arg rotation "$rotational" \
'{name: $name, size: $size, type: $type, mountpoint: $mount, filesystem: $fs, storage_type: $rotation}')
storage_devices=$(echo "$storage_devices" | jq ". + [$device_info]")
fi
done < <(lsblk -o NAME,SIZE,TYPE,MOUNTPOINT,FSTYPE --noheadings | grep -E "^[a-z]")
fi
# Combine hardware info
local hardware_data=$(jq -n --argjson cpu "$cpu_info" \
--argjson memory "$memory_info" \
--argjson gpu "$gpu_info" \
--argjson storage "$storage_devices" \
'{cpu: $cpu, memory: $memory, gpu: $gpu, storage: $storage}')
jq --argjson hardware "$hardware_data" '.hardware = $hardware' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_os_info() {
echo "🐧 Collecting operating system information..."
local os_info=$(cat << 'EOF'
{
"distribution": "",
"version": "",
"kernel": "",
"architecture": "",
"installed_packages": [],
"running_services": [],
"firewall_status": ""
}
EOF
)
# OS Distribution and Version
local distro="Unknown"
local version="Unknown"
if [[ -f /etc/os-release ]]; then
distro=$(grep "^NAME=" /etc/os-release | cut -d'"' -f2)
version=$(grep "^VERSION=" /etc/os-release | cut -d'"' -f2)
fi
local kernel=$(uname -r)
local arch=$(uname -m)
# Installed packages (limit to essential packages to avoid huge lists)
local packages='[]'
if command -v dpkg &>/dev/null; then
packages=$(dpkg -l | grep "^ii" | awk '{print $2}' | grep -E "(docker|nginx|apache|mysql|postgresql|redis|nodejs|python)" | jq -R . | jq -s .)
elif command -v rpm &>/dev/null; then
packages=$(rpm -qa | grep -E "(docker|nginx|apache|mysql|postgresql|redis|nodejs|python)" | jq -R . | jq -s .)
fi
# Running services
local services='[]'
if command -v systemctl &>/dev/null; then
services=$(systemctl list-units --type=service --state=active --no-pager --no-legend |
awk '{print $1}' | sed 's/.service$//' | head -20 | jq -R . | jq -s .)
fi
# Firewall status
local firewall_status="unknown"
if command -v ufw &>/dev/null; then
if ufw status | grep -q "Status: active"; then
firewall_status="ufw_active"
else
firewall_status="ufw_inactive"
fi
elif command -v firewall-cmd &>/dev/null; then
if firewall-cmd --state 2>/dev/null | grep -q "running"; then
firewall_status="firewalld_active"
else
firewall_status="firewalld_inactive"
fi
fi
os_info=$(echo "$os_info" | jq --arg distro "$distro" \
--arg version "$version" \
--arg kernel "$kernel" \
--arg arch "$arch" \
--argjson packages "$packages" \
--argjson services "$services" \
--arg firewall "$firewall_status" \
'.distribution = $distro | .version = $version | .kernel = $kernel | .architecture = $arch | .installed_packages = $packages | .running_services = $services | .firewall_status = $firewall')
jq --argjson os "$os_info" '.operating_system = $os' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_network_info() {
echo "🌐 Collecting network configuration..."
local network_info=$(cat << 'EOF'
{
"interfaces": [],
"routing": [],
"dns_config": {},
"open_ports": [],
"docker_networks": []
}
EOF
)
# Network interfaces
local interfaces='[]'
if command -v ip &>/dev/null; then
while IFS= read -r line; do
if [[ "$line" =~ ^[0-9]+:[[:space:]]+([^:]+): ]]; then
local iface="${BASH_REMATCH[1]}"
local ip_addr=$(ip addr show "$iface" 2>/dev/null | grep "inet " | head -1 | awk '{print $2}' || echo "")
local state=$(ip link show "$iface" 2>/dev/null | head -1 | grep -o "state [A-Z]*" | cut -d' ' -f2 || echo "UNKNOWN")
local iface_info=$(jq -n --arg name "$iface" --arg ip "$ip_addr" --arg state "$state" \
'{name: $name, ip_address: $ip, state: $state}')
interfaces=$(echo "$interfaces" | jq ". + [$iface_info]")
fi
done < <(ip link show)
fi
# Routing table
local routing='[]'
if command -v ip &>/dev/null; then
while IFS= read -r line; do
local route_info=$(echo "$line" | jq -R .)
routing=$(echo "$routing" | jq ". + [$route_info]")
done < <(ip route show | head -10)
fi
# DNS configuration
local dns_config='{}'
if [[ -f /etc/resolv.conf ]]; then
local nameservers=$(grep "nameserver" /etc/resolv.conf | awk '{print $2}' | jq -R . | jq -s .)
local domain=$(grep "domain\|search" /etc/resolv.conf | head -1 | awk '{print $2}' || echo "")
dns_config=$(jq -n --argjson nameservers "$nameservers" --arg domain "$domain" \
'{nameservers: $nameservers, domain: $domain}')
fi
# Open ports
local open_ports='[]'
if command -v ss &>/dev/null; then
while IFS= read -r line; do
local port_info=$(echo "$line" | jq -R .)
open_ports=$(echo "$open_ports" | jq ". + [$port_info]")
done < <(ss -tuln | grep LISTEN | head -20)
elif command -v netstat &>/dev/null; then
while IFS= read -r line; do
local port_info=$(echo "$line" | jq -R .)
open_ports=$(echo "$open_ports" | jq ". + [$port_info]")
done < <(netstat -tuln | grep LISTEN | head -20)
fi
# Docker networks
local docker_networks='[]'
if command -v docker &>/dev/null && docker info &>/dev/null; then
while IFS= read -r line; do
local network_info=$(echo "$line" | jq -R . | jq 'split(" ") | {name: .[0], id: .[1], driver: .[2], scope: .[3]}')
docker_networks=$(echo "$docker_networks" | jq ". + [$network_info]")
done < <(docker network ls --format "{{.Name}} {{.ID}} {{.Driver}} {{.Scope}}" 2>/dev/null || echo "")
fi
network_info=$(echo "$network_info" | jq --argjson interfaces "$interfaces" \
--argjson routing "$routing" \
--argjson dns "$dns_config" \
--argjson ports "$open_ports" \
--argjson docker_nets "$docker_networks" \
'.interfaces = $interfaces | .routing = $routing | .dns_config = $dns | .open_ports = $ports | .docker_networks = $docker_nets')
jq --argjson network "$network_info" '.network = $network' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_storage_info() {
echo "💾 Collecting storage information..."
local storage_info=$(cat << 'EOF'
{
"filesystems": [],
"disk_usage": [],
"mount_points": [],
"docker_volumes": []
}
EOF
)
# Filesystem information
local filesystems='[]'
while IFS= read -r line; do
local fs_info=$(echo "$line" | awk '{print "{\"filesystem\":\"" $1 "\",\"size\":\"" $2 "\",\"used\":\"" $3 "\",\"available\":\"" $4 "\",\"use_percent\":\"" $5 "\",\"mount\":\"" $6 "\"}"}' | jq .)
filesystems=$(echo "$filesystems" | jq ". + [$fs_info]")
done < <(df -h | grep -E "^/dev")
# Disk usage for important directories
local disk_usage='[]'
local important_dirs=("/home" "/var" "/opt" "/usr" "/etc")
for dir in "${important_dirs[@]}"; do
if [[ -d "$dir" ]]; then
local usage=$(du -sh "$dir" 2>/dev/null | awk '{print $1}' || echo "unknown")
local usage_info=$(jq -n --arg dir "$dir" --arg size "$usage" '{directory: $dir, size: $size}')
disk_usage=$(echo "$disk_usage" | jq ". + [$usage_info]")
fi
done
# Mount points with options
local mount_points='[]'
while IFS= read -r line; do
if [[ "$line" =~ ^([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+)[[:space:]]+([^[:space:]]+) ]]; then
local device="${BASH_REMATCH[1]}"
local mount="${BASH_REMATCH[2]}"
local fstype="${BASH_REMATCH[3]}"
local options="${BASH_REMATCH[4]}"
local mount_info=$(jq -n --arg device "$device" --arg mount "$mount" --arg fstype "$fstype" --arg opts "$options" \
'{device: $device, mountpoint: $mount, filesystem: $fstype, options: $opts}')
mount_points=$(echo "$mount_points" | jq ". + [$mount_info]")
fi
done < <(cat /proc/mounts | grep -E "^/dev")
# Docker volumes
local docker_volumes='[]'
if command -v docker &>/dev/null && docker info &>/dev/null; then
while IFS= read -r line; do
local vol_name=$(echo "$line" | awk '{print $1}')
local vol_driver=$(echo "$line" | awk '{print $2}')
local vol_mountpoint=$(docker volume inspect "$vol_name" --format '{{.Mountpoint}}' 2>/dev/null || echo "unknown")
local vol_size=$(du -sh "$vol_mountpoint" 2>/dev/null | awk '{print $1}' || echo "unknown")
local vol_info=$(jq -n --arg name "$vol_name" --arg driver "$vol_driver" --arg mount "$vol_mountpoint" --arg size "$vol_size" \
'{name: $name, driver: $driver, mountpoint: $mount, size: $size}')
docker_volumes=$(echo "$docker_volumes" | jq ". + [$vol_info]")
done < <(docker volume ls --format "{{.Name}} {{.Driver}}" 2>/dev/null || echo "")
fi
storage_info=$(echo "$storage_info" | jq --argjson filesystems "$filesystems" \
--argjson disk_usage "$disk_usage" \
--argjson mount_points "$mount_points" \
--argjson docker_volumes "$docker_volumes" \
'.filesystems = $filesystems | .disk_usage = $disk_usage | .mount_points = $mount_points | .docker_volumes = $docker_volumes')
jq --argjson storage "$storage_info" '.storage = $storage' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
collect_performance_baseline() {
echo "📊 Collecting performance baseline..."
local performance_info=$(cat << 'EOF'
{
"load_average": {},
"cpu_usage": {},
"memory_usage": {},
"disk_io": {},
"network_stats": {}
}
EOF
)
# Load average
local load_avg='{}'
if [[ -f /proc/loadavg ]]; then
local load_data=$(cat /proc/loadavg)
local load_1min=$(echo "$load_data" | awk '{print $1}')
local load_5min=$(echo "$load_data" | awk '{print $2}')
local load_15min=$(echo "$load_data" | awk '{print $3}')
load_avg=$(jq -n --arg l1 "$load_1min" --arg l5 "$load_5min" --arg l15 "$load_15min" \
'{one_minute: $l1, five_minute: $l5, fifteen_minute: $l15}')
fi
# CPU usage snapshot
local cpu_usage='{}'
if [[ -f /proc/stat ]]; then
local cpu_line=$(grep "^cpu " /proc/stat)
cpu_usage=$(echo "$cpu_line" | awk '{print "{\"user\":" $2 ",\"nice\":" $3 ",\"system\":" $4 ",\"idle\":" $5 ",\"iowait\":" $6 "}"}' | jq .)
fi
# Memory usage
local memory_usage='{}'
if [[ -f /proc/meminfo ]]; then
local mem_total=$(grep "MemTotal" /proc/meminfo | awk '{print $2}')
local mem_free=$(grep "MemFree" /proc/meminfo | awk '{print $2}')
local mem_available=$(grep "MemAvailable" /proc/meminfo | awk '{print $2}')
local mem_used=$((mem_total - mem_free))
memory_usage=$(jq -n --argjson total "$mem_total" --argjson free "$mem_free" --argjson available "$mem_available" --argjson used "$mem_used" \
'{total_kb: $total, free_kb: $free, available_kb: $available, used_kb: $used}')
fi
# Disk I/O stats
local disk_io='[]'
if [[ -f /proc/diskstats ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^[[:space:]]*[0-9]+[[:space:]]+[0-9]+[[:space:]]+([a-z]+)[[:space:]]+([0-9]+)[[:space:]]+[0-9]+[[:space:]]+([0-9]+)[[:space:]]+[0-9]+[[:space:]]+([0-9]+)[[:space:]]+[0-9]+[[:space:]]+([0-9]+) ]]; then
local device="${BASH_REMATCH[1]}"
local reads="${BASH_REMATCH[2]}"
local read_sectors="${BASH_REMATCH[3]}"
local writes="${BASH_REMATCH[4]}"
local write_sectors="${BASH_REMATCH[5]}"
# Only include main devices, not partitions
if [[ ! "$device" =~ [0-9]+$ ]]; then
local io_info=$(jq -n --arg dev "$device" --arg reads "$reads" --arg read_sectors "$read_sectors" --arg writes "$writes" --arg write_sectors "$write_sectors" \
'{device: $dev, reads: $reads, read_sectors: $read_sectors, writes: $writes, write_sectors: $write_sectors}')
disk_io=$(echo "$disk_io" | jq ". + [$io_info]")
fi
fi
done < <(cat /proc/diskstats)
fi
# Network stats
local network_stats='[]'
if [[ -f /proc/net/dev ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^[[:space:]]*([^:]+):[[:space:]]*([0-9]+)[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+[0-9]+[[:space:]]+([0-9]+) ]]; then
local interface="${BASH_REMATCH[1]}"
local rx_bytes="${BASH_REMATCH[2]}"
local tx_bytes="${BASH_REMATCH[3]}"
# Skip loopback
if [[ "$interface" != "lo" ]]; then
local net_info=$(jq -n --arg iface "$interface" --arg rx "$rx_bytes" --arg tx "$tx_bytes" \
'{interface: $iface, rx_bytes: $rx, tx_bytes: $tx}')
network_stats=$(echo "$network_stats" | jq ". + [$net_info]")
fi
fi
done < <(tail -n +3 /proc/net/dev)
fi
performance_info=$(echo "$performance_info" | jq --argjson load "$load_avg" \
--argjson cpu "$cpu_usage" \
--argjson memory "$memory_usage" \
--argjson disk_io "$disk_io" \
--argjson network "$network_stats" \
'.load_average = $load | .cpu_usage = $cpu | .memory_usage = $memory | .disk_io = $disk_io | .network_stats = $network')
jq --argjson performance "$performance_info" '.performance = $performance' "$REPORT_FILE" > "${REPORT_FILE}.tmp" && mv "${REPORT_FILE}.tmp" "$REPORT_FILE"
}
generate_summary() {
echo ""
echo "📋 SYSTEM DISCOVERY SUMMARY"
echo "=========================="
# Extract key information for summary
local hostname=$(jq -r '.discovery_metadata.hostname' "$REPORT_FILE")
local cpu_model=$(jq -r '.hardware.cpu.model' "$REPORT_FILE")
local memory_gb=$(jq -r '.hardware.memory.total_gb' "$REPORT_FILE")
local os_distro=$(jq -r '.operating_system.distribution' "$REPORT_FILE")
local storage_count=$(jq '.hardware.storage | length' "$REPORT_FILE")
local network_interfaces=$(jq '.network.interfaces | length' "$REPORT_FILE")
local docker_containers=$(jq '.network.docker_networks | length' "$REPORT_FILE")
echo "Hostname: $hostname"
echo "CPU: $cpu_model"
echo "Memory: ${memory_gb}GB"
echo "OS: $os_distro"
echo "Storage Devices: $storage_count"
echo "Network Interfaces: $network_interfaces"
echo "Docker Networks: $docker_containers"
echo ""
echo "Full report: $REPORT_FILE"
echo "Next: Run service_inventory_collector.sh"
}
# Execute main function
main "$@"

View File

@@ -0,0 +1,113 @@
#!/bin/bash
#
# Targeted Data Discovery Script
# Fast identification of critical data locations for migration planning
# Avoids filesystem traversal bottlenecks
#
set -euo pipefail
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
HOSTNAME=$(hostname -f)
OUTPUT_DIR="/tmp/data_discovery_${HOSTNAME}_${TIMESTAMP}"
mkdir -p "$OUTPUT_DIR"
LOG_FILE="${OUTPUT_DIR}/data.log"
exec > >(tee -a "$LOG_FILE") 2>&1
echo "Starting Data Discovery on ${HOSTNAME} at $(date)"
echo "Output: $OUTPUT_DIR"
echo "========================================"
# Database locations (common paths only)
echo "1. Database Locations"
echo "--- PostgreSQL ---" > "$OUTPUT_DIR/databases.txt"
find /var/lib/postgresql /opt/postgresql -name "*.conf" -o -name "postgresql.conf" 2>/dev/null >> "$OUTPUT_DIR/databases.txt" || true
echo "--- MySQL/MariaDB ---" >> "$OUTPUT_DIR/databases.txt"
find /var/lib/mysql /etc/mysql -name "my.cnf" -o -name "*.cnf" 2>/dev/null >> "$OUTPUT_DIR/databases.txt" || true
echo "--- SQLite ---" >> "$OUTPUT_DIR/databases.txt"
find /var/lib /opt -maxdepth 3 -name "*.db" -o -name "*.sqlite*" 2>/dev/null >> "$OUTPUT_DIR/databases.txt" || true
# Docker data locations
echo "2. Docker Data Locations"
if command -v docker >/dev/null 2>&1; then
docker system df > "$OUTPUT_DIR/docker_storage.txt" 2>/dev/null || echo "Docker system df failed"
docker volume ls --format "table {{.Name}}\t{{.Driver}}\t{{.Mountpoint}}" > "$OUTPUT_DIR/docker_volumes.txt" 2>/dev/null || true
# Get volume mount points
echo "Docker volume details:" > "$OUTPUT_DIR/docker_volume_details.txt"
docker volume ls --format "{{.Name}}" | while read volume; do
echo "Volume: $volume" >> "$OUTPUT_DIR/docker_volume_details.txt"
docker volume inspect "$volume" 2>/dev/null >> "$OUTPUT_DIR/docker_volume_details.txt" || true
echo "---" >> "$OUTPUT_DIR/docker_volume_details.txt"
done
fi
# Configuration files (targeted search)
echo "3. Critical Configuration Files"
echo "=== Application Configs ===" > "$OUTPUT_DIR/config_files.txt"
find /etc -maxdepth 2 -name "*.conf" -o -name "*.cfg" -o -name "*.ini" 2>/dev/null | head -30 >> "$OUTPUT_DIR/config_files.txt"
echo "=== Docker Compose Files ===" >> "$OUTPUT_DIR/config_files.txt"
find /opt /home -maxdepth 4 -name "docker-compose.yml" -o -name "docker-compose.yaml" -o -name "compose.yml" 2>/dev/null >> "$OUTPUT_DIR/config_files.txt" || true
# Storage and mount information
echo "4. Storage & Mount Points"
df -hT > "$OUTPUT_DIR/disk_usage.txt"
mount > "$OUTPUT_DIR/mount_points.txt"
lsblk -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT > "$OUTPUT_DIR/block_devices.txt"
# NFS and network storage
echo "5. Network Storage"
if command -v showmount >/dev/null 2>&1; then
showmount -e localhost > "$OUTPUT_DIR/nfs_exports.txt" 2>/dev/null || echo "No NFS exports"
fi
grep nfs /proc/mounts > "$OUTPUT_DIR/nfs_mounts.txt" 2>/dev/null || echo "No NFS mounts"
# Samba/SMB shares
echo "6. SMB/Samba Shares"
if command -v smbstatus >/dev/null 2>&1; then
smbstatus -S > "$OUTPUT_DIR/smb_shares.txt" 2>/dev/null || echo "SMB not running"
fi
if [ -f /etc/samba/smb.conf ]; then
cp /etc/samba/smb.conf "$OUTPUT_DIR/" 2>/dev/null || true
fi
# Application-specific data directories
echo "7. Application Data Directories"
echo "=== Common App Directories ===" > "$OUTPUT_DIR/app_directories.txt"
ls -la /var/lib/ 2>/dev/null | grep -E "(mysql|postgresql|redis|nginx|apache|docker)" >> "$OUTPUT_DIR/app_directories.txt" || true
echo "=== /opt Applications ===" >> "$OUTPUT_DIR/app_directories.txt"
ls -la /opt/ 2>/dev/null >> "$OUTPUT_DIR/app_directories.txt" || true
echo "=== /srv Data ===" >> "$OUTPUT_DIR/app_directories.txt"
ls -la /srv/ 2>/dev/null >> "$OUTPUT_DIR/app_directories.txt" || true
# Log directories (critical for troubleshooting)
echo "8. Log Locations"
echo "=== System Logs ===" > "$OUTPUT_DIR/log_locations.txt"
ls -la /var/log/ | head -20 >> "$OUTPUT_DIR/log_locations.txt"
echo "=== Application Logs ===" >> "$OUTPUT_DIR/log_locations.txt"
find /opt /var/log -maxdepth 3 -name "*.log" 2>/dev/null | head -20 >> "$OUTPUT_DIR/log_locations.txt" || true
# Home directory critical data
echo "9. User Data Locations"
ls -la /home/ > "$OUTPUT_DIR/user_directories.txt" 2>/dev/null || echo "No /home directory"
find /home -maxdepth 2 -type d -name ".*" 2>/dev/null | head -20 > "$OUTPUT_DIR/user_hidden_dirs.txt" || true
# System package data
echo "10. Package Manager Data"
if command -v dpkg >/dev/null 2>&1; then
dpkg -l | wc -l > "$OUTPUT_DIR/package_count.txt"
echo "dpkg packages: $(cat "$OUTPUT_DIR/package_count.txt")" >> "$OUTPUT_DIR/package_summary.txt"
fi
if command -v rpm >/dev/null 2>&1; then
rpm -qa | wc -l > "$OUTPUT_DIR/rpm_package_count.txt"
echo "rpm packages: $(cat "$OUTPUT_DIR/rpm_package_count.txt")" >> "$OUTPUT_DIR/package_summary.txt"
fi
# Backup locations
echo "11. Backup Locations"
echo "=== Common Backup Directories ===" > "$OUTPUT_DIR/backup_locations.txt"
find /backup /backups /mnt -maxdepth 2 -type d 2>/dev/null >> "$OUTPUT_DIR/backup_locations.txt" || echo "No backup directories found"
echo "Data discovery completed at $(date)"
echo "Results in: $OUTPUT_DIR"
ls -la "$OUTPUT_DIR"

View File

@@ -0,0 +1,134 @@
#!/bin/bash
#
# Targeted Performance Discovery Script
# Fast collection of performance metrics and resource usage
#
set -euo pipefail
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
HOSTNAME=$(hostname -f)
OUTPUT_DIR="/tmp/performance_discovery_${HOSTNAME}_${TIMESTAMP}"
mkdir -p "$OUTPUT_DIR"
LOG_FILE="${OUTPUT_DIR}/performance.log"
exec > >(tee -a "$LOG_FILE") 2>&1
echo "Starting Performance Discovery on ${HOSTNAME} at $(date)"
echo "Output: $OUTPUT_DIR"
echo "=========================================="
# System load and uptime
echo "1. System Load & Uptime"
uptime > "$OUTPUT_DIR/uptime.txt"
cat /proc/loadavg > "$OUTPUT_DIR/load_average.txt"
w > "$OUTPUT_DIR/who_load.txt"
# CPU information and usage
echo "2. CPU Information & Usage"
lscpu > "$OUTPUT_DIR/cpu_info.txt"
cat /proc/cpuinfo | grep -E "(processor|model name|cpu MHz|cache size)" > "$OUTPUT_DIR/cpu_details.txt"
top -b -n1 | head -20 > "$OUTPUT_DIR/cpu_top.txt"
# Memory usage
echo "3. Memory Usage"
free -h > "$OUTPUT_DIR/memory_free.txt"
cat /proc/meminfo > "$OUTPUT_DIR/memory_detailed.txt"
ps aux --sort=-%mem | head -20 > "$OUTPUT_DIR/memory_top_processes.txt"
# Disk I/O and usage
echo "4. Disk I/O & Usage"
if command -v iostat >/dev/null 2>&1; then
iostat -x 1 3 > "$OUTPUT_DIR/iostat.txt" 2>/dev/null || echo "iostat failed"
else
echo "iostat not available" > "$OUTPUT_DIR/iostat.txt"
fi
df -h > "$OUTPUT_DIR/disk_usage.txt"
df -i > "$OUTPUT_DIR/inode_usage.txt"
# Network performance
echo "5. Network Performance"
if command -v ss >/dev/null 2>&1; then
ss -s > "$OUTPUT_DIR/network_summary.txt"
ss -tuln > "$OUTPUT_DIR/network_listening.txt"
else
netstat -s > "$OUTPUT_DIR/network_summary.txt" 2>/dev/null || echo "netstat not available"
netstat -tuln > "$OUTPUT_DIR/network_listening.txt" 2>/dev/null || echo "netstat not available"
fi
# Network interface statistics
cat /proc/net/dev > "$OUTPUT_DIR/network_interfaces.txt"
ip -s link > "$OUTPUT_DIR/interface_stats.txt" 2>/dev/null || ifconfig -a > "$OUTPUT_DIR/interface_stats.txt" 2>/dev/null
# Process information
echo "6. Process Information"
ps aux --sort=-%cpu | head -30 > "$OUTPUT_DIR/processes_by_cpu.txt"
ps aux --sort=-%mem | head -30 > "$OUTPUT_DIR/processes_by_memory.txt"
ps -eo pid,ppid,cmd,%mem,%cpu --sort=-%cpu | head -30 > "$OUTPUT_DIR/processes_detailed.txt"
# System services performance
echo "7. System Services"
systemctl list-units --type=service --state=running --no-pager > "$OUTPUT_DIR/running_services.txt"
systemctl list-units --failed --no-pager > "$OUTPUT_DIR/failed_services.txt"
# Docker performance (if available)
echo "8. Container Performance"
if command -v docker >/dev/null 2>&1; then
docker system df > "$OUTPUT_DIR/docker_storage_usage.txt" 2>/dev/null || echo "Docker system df failed"
docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}" > "$OUTPUT_DIR/docker_stats.txt" 2>/dev/null || echo "Docker stats failed"
docker system events --since "1h" --until "now" > "$OUTPUT_DIR/docker_events.txt" 2>/dev/null || echo "No recent docker events"
else
echo "Docker not available" > "$OUTPUT_DIR/docker_status.txt"
fi
# Kernel and system information
echo "9. Kernel & System Info"
uname -a > "$OUTPUT_DIR/kernel_info.txt"
cat /proc/version > "$OUTPUT_DIR/kernel_version.txt"
dmesg | tail -50 > "$OUTPUT_DIR/dmesg_recent.txt" 2>/dev/null || echo "dmesg not accessible"
# Resource limits
echo "10. Resource Limits"
ulimit -a > "$OUTPUT_DIR/ulimits.txt"
cat /proc/sys/fs/file-max > "$OUTPUT_DIR/file_max.txt" 2>/dev/null || echo "file-max not readable"
cat /proc/sys/fs/file-nr > "$OUTPUT_DIR/file_nr.txt" 2>/dev/null || echo "file-nr not readable"
# Temperature and hardware sensors (if available)
echo "11. Hardware Sensors"
if command -v sensors >/dev/null 2>&1; then
sensors > "$OUTPUT_DIR/temperature_sensors.txt" 2>/dev/null || echo "sensors failed"
else
echo "lm-sensors not available" > "$OUTPUT_DIR/temperature_sensors.txt"
fi
# Storage device performance
echo "12. Storage Performance"
if command -v smartctl >/dev/null 2>&1; then
# Check primary storage device
primary_disk=$(lsblk -d -o NAME,TYPE | grep disk | head -1 | awk '{print $1}')
if [ ! -z "$primary_disk" ]; then
smartctl -a "/dev/$primary_disk" > "$OUTPUT_DIR/smart_${primary_disk}.txt" 2>/dev/null || echo "SMART data not available for $primary_disk"
fi
else
echo "smartmontools not available" > "$OUTPUT_DIR/smart_status.txt"
fi
# System performance over time (brief sample)
echo "13. Performance Sampling"
echo "Sampling system performance for 30 seconds..."
{
echo "=== CPU Usage Sample ==="
sar 5 6 2>/dev/null || vmstat 5 6 2>/dev/null || echo "No sar/vmstat available"
echo "=== Load Average Sample ==="
for i in {1..6}; do
echo "$(date): $(cat /proc/loadavg)"
sleep 5
done
} > "$OUTPUT_DIR/performance_sample.txt" &
# Wait for sampling to complete
echo "Performance sampling running in background..."
wait
echo "Performance discovery completed at $(date)"
echo "Results in: $OUTPUT_DIR"
ls -la "$OUTPUT_DIR"

View File

@@ -0,0 +1,99 @@
#!/bin/bash
#
# Targeted Security Discovery Script
# Fast collection of security-critical data for migration planning
#
set -euo pipefail
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
HOSTNAME=$(hostname -f)
OUTPUT_DIR="/tmp/security_discovery_${HOSTNAME}_${TIMESTAMP}"
mkdir -p "$OUTPUT_DIR"
LOG_FILE="${OUTPUT_DIR}/security.log"
exec > >(tee -a "$LOG_FILE") 2>&1
echo "Starting Security Discovery on ${HOSTNAME} at $(date)"
echo "Output: $OUTPUT_DIR"
echo "============================================"
# User & Access Control
echo "1. User Accounts & Access"
cat /etc/passwd > "$OUTPUT_DIR/users.txt"
cat /etc/group > "$OUTPUT_DIR/groups.txt"
awk -F: '$3 == 0 {print $1}' /etc/passwd > "$OUTPUT_DIR/root_users.txt"
grep -E '^(sudo|wheel):' /etc/group > "$OUTPUT_DIR/sudo_users.txt" 2>/dev/null || echo "No sudo group found"
who > "$OUTPUT_DIR/current_logins.txt"
last -10 > "$OUTPUT_DIR/last_logins.txt"
# SSH Configuration
echo "2. SSH Configuration"
if [ -f /etc/ssh/sshd_config ]; then
cp /etc/ssh/sshd_config "$OUTPUT_DIR/"
grep -E '^(Port|PermitRootLogin|PasswordAuthentication|PubkeyAuthentication|Protocol)' /etc/ssh/sshd_config > "$OUTPUT_DIR/ssh_key_settings.txt"
fi
# Find SSH keys
echo "3. SSH Keys"
find /home -name ".ssh" -type d 2>/dev/null | while read ssh_dir; do
user=$(echo "$ssh_dir" | cut -d'/' -f3)
ls -la "$ssh_dir" > "$OUTPUT_DIR/ssh_keys_${user}.txt" 2>/dev/null || true
done
ls -la /root/.ssh/ > "$OUTPUT_DIR/ssh_keys_root.txt" 2>/dev/null || echo "No root SSH keys"
# Firewall & Network Security
echo "4. Firewall Configuration"
if command -v ufw >/dev/null 2>&1; then
ufw status verbose > "$OUTPUT_DIR/ufw_status.txt" 2>/dev/null || echo "UFW not accessible"
fi
if command -v iptables >/dev/null 2>&1; then
iptables -L -n -v > "$OUTPUT_DIR/iptables_rules.txt" 2>/dev/null || echo "iptables not accessible"
fi
if command -v firewall-cmd >/dev/null 2>&1; then
firewall-cmd --list-all > "$OUTPUT_DIR/firewalld_config.txt" 2>/dev/null || echo "firewalld not accessible"
fi
# Open ports and listening services
ss -tuln > "$OUTPUT_DIR/open_ports.txt" 2>/dev/null || netstat -tuln > "$OUTPUT_DIR/open_ports.txt" 2>/dev/null
# Scheduled tasks
echo "5. Scheduled Tasks"
crontab -l > "$OUTPUT_DIR/root_crontab.txt" 2>/dev/null || echo "No root crontab"
if [ -f /etc/crontab ]; then
cp /etc/crontab "$OUTPUT_DIR/"
fi
if [ -d /etc/cron.d ]; then
cp -r /etc/cron.d "$OUTPUT_DIR/"
fi
# Check for dangerous SUID files
echo "6. SUID/SGID Files"
find / -type f \( -perm -4000 -o -perm -2000 \) 2>/dev/null | head -50 > "$OUTPUT_DIR/suid_files.txt"
# File permissions audit
echo "7. Critical File Permissions"
ls -la /etc/passwd /etc/shadow /etc/sudoers > "$OUTPUT_DIR/critical_file_perms.txt" 2>/dev/null
# Failed login attempts
echo "8. Security Logs"
if [ -f /var/log/auth.log ]; then
grep "Failed password" /var/log/auth.log | tail -50 > "$OUTPUT_DIR/failed_logins.txt" 2>/dev/null || echo "No failed login entries"
elif [ -f /var/log/secure ]; then
grep "Failed password" /var/log/secure | tail -50 > "$OUTPUT_DIR/failed_logins.txt" 2>/dev/null || echo "No failed login entries"
fi
# Check for sensitive data in environment
echo "9. Environment Security"
env | grep -i -E "(password|key|secret|token)" > "$OUTPUT_DIR/sensitive_env_vars.txt" 2>/dev/null || echo "No obvious sensitive env vars"
# Package manager security updates
echo "10. Security Updates"
if command -v apt >/dev/null 2>&1; then
apt list --upgradable 2>/dev/null | grep -i security > "$OUTPUT_DIR/security_updates.txt" || echo "No security updates found"
elif command -v dnf >/dev/null 2>&1; then
dnf check-update --security > "$OUTPUT_DIR/security_updates.txt" 2>/dev/null || echo "No security updates found"
fi
echo "Security discovery completed at $(date)"
echo "Results in: $OUTPUT_DIR"
ls -la "$OUTPUT_DIR"