COMPREHENSIVE CHANGES: INFRASTRUCTURE MIGRATION: - Migrated services to Docker Swarm on OMV800 (192.168.50.229) - Deployed PostgreSQL database for Vaultwarden migration - Updated all stack configurations for Docker Swarm compatibility - Added comprehensive monitoring stack (Prometheus, Grafana, Blackbox) - Implemented proper secret management for all services VAULTWARDEN POSTGRESQL MIGRATION: - Attempted migration from SQLite to PostgreSQL for NFS compatibility - Created PostgreSQL stack with proper user/password configuration - Built custom Vaultwarden image with PostgreSQL support - Troubleshot persistent SQLite fallback issue despite PostgreSQL config - Identified known issue where Vaultwarden silently falls back to SQLite - Added ENABLE_DB_WAL=false to prevent filesystem compatibility issues - Current status: Old Vaultwarden on lenovo410 still working, new one has config issues PAPERLESS SERVICES: - Successfully deployed Paperless-NGX and Paperless-AI on OMV800 - Both services running on ports 8000 and 3000 respectively - Caddy configuration updated for external access - Services accessible via paperless.pressmess.duckdns.org and paperless-ai.pressmess.duckdns.org CADDY CONFIGURATION: - Updated Caddyfile on Surface (192.168.50.254) for new service locations - Fixed Vaultwarden reverse proxy to point to new Docker Swarm service - Removed old notification hub reference that was causing conflicts - All services properly configured for external access via DuckDNS BACKUP AND DISCOVERY: - Created comprehensive backup system for all hosts - Generated detailed discovery reports for infrastructure analysis - Implemented automated backup validation scripts - Created migration progress tracking and verification reports MONITORING STACK: - Deployed Prometheus, Grafana, and Blackbox monitoring - Created infrastructure and system overview dashboards - Added proper service discovery and alerting configuration - Implemented performance monitoring for all critical services DOCUMENTATION: - Reorganized documentation into logical structure - Created comprehensive migration playbook and troubleshooting guides - Added hardware specifications and optimization recommendations - Documented all configuration changes and service dependencies CURRENT STATUS: - Paperless services: ✅ Working and accessible externally - Vaultwarden: ❌ PostgreSQL configuration issues, old instance still working - Monitoring: ✅ Deployed and operational - Caddy: ✅ Updated and working for external access - PostgreSQL: ✅ Database running, connection issues with Vaultwarden NEXT STEPS: - Continue troubleshooting Vaultwarden PostgreSQL configuration - Consider alternative approaches for Vaultwarden migration - Validate all external service access - Complete final migration validation TECHNICAL NOTES: - Used Docker Swarm for orchestration on OMV800 - Implemented proper secret management for sensitive data - Added comprehensive logging and monitoring - Created automated backup and validation scripts
450 lines
14 KiB
Bash
Executable File
450 lines
14 KiB
Bash
Executable File
#!/bin/bash
|
|
# Backup and Restore Testing Script
|
|
# Validates backup and restore procedures across the infrastructure
|
|
|
|
set -euo pipefail
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Function to print colored output
|
|
print_status() {
|
|
echo -e "${GREEN}[INFO]${NC} $1"
|
|
}
|
|
|
|
print_warning() {
|
|
echo -e "${YELLOW}[WARNING]${NC} $1"
|
|
}
|
|
|
|
print_error() {
|
|
echo -e "${RED}[ERROR]${NC} $1"
|
|
}
|
|
|
|
print_header() {
|
|
echo -e "${BLUE}[HEADER]${NC} $1"
|
|
}
|
|
|
|
# Configuration
|
|
BACKUP_ROOT="/backup"
|
|
TEST_BACKUP_DIR="$BACKUP_ROOT/test_backup_$(date +%Y%m%d_%H%M%S)"
|
|
TEST_RESTORE_DIR="/tmp/test_restore_$(date +%Y%m%d_%H%M%S)"
|
|
HOSTS=("omv800.local" "jonathan-2518f5u" "surface" "fedora" "audrey")
|
|
OUTPUT_FILE="${1:-/tmp/backup_restore_test_report.txt}"
|
|
TEST_DATA_SIZE="100M"
|
|
COMPRESSION_TYPES=("gzip" "bzip2" "xz")
|
|
ENCRYPTION_KEY="test_encryption_key_$(date +%s)"
|
|
|
|
# Function to check backup infrastructure
|
|
check_backup_infrastructure() {
|
|
print_header "Checking Backup Infrastructure"
|
|
|
|
# Check if backup directory exists
|
|
if [ ! -d "$BACKUP_ROOT" ]; then
|
|
print_error "Backup directory $BACKUP_ROOT does not exist"
|
|
print_status "Creating backup directory structure..."
|
|
sudo mkdir -p "$BACKUP_ROOT"/{snapshots,database_dumps,configs,volumes,test}
|
|
sudo chown -R $USER:$USER "$BACKUP_ROOT"
|
|
fi
|
|
|
|
# Check backup directory permissions
|
|
if [ ! -w "$BACKUP_ROOT" ]; then
|
|
print_error "Backup directory $BACKUP_ROOT is not writable"
|
|
return 1
|
|
fi
|
|
|
|
# Check available space
|
|
local available_space=$(df "$BACKUP_ROOT" | awk 'NR==2 {print $4}')
|
|
local required_space=1048576 # 1GB in KB
|
|
|
|
if [ "$available_space" -lt "$required_space" ]; then
|
|
print_warning "Low disk space in backup directory: ${available_space}KB available, ${required_space}KB required"
|
|
else
|
|
print_status "Sufficient disk space available: ${available_space}KB"
|
|
fi
|
|
|
|
print_status "Backup infrastructure check completed"
|
|
}
|
|
|
|
# Function to create test data
|
|
create_test_data() {
|
|
print_header "Creating Test Data"
|
|
|
|
mkdir -p "$TEST_BACKUP_DIR"
|
|
|
|
# Create various types of test data
|
|
print_status "Creating database dump simulation..."
|
|
dd if=/dev/urandom of="$TEST_BACKUP_DIR/database_dump.sql" bs=1M count=50 2>/dev/null
|
|
|
|
print_status "Creating configuration files..."
|
|
mkdir -p "$TEST_BACKUP_DIR/configs"
|
|
for i in {1..10}; do
|
|
echo "config_value_$i=test_data_$i" > "$TEST_BACKUP_DIR/configs/config_$i.conf"
|
|
done
|
|
|
|
print_status "Creating volume data simulation..."
|
|
mkdir -p "$TEST_BACKUP_DIR/volumes"
|
|
dd if=/dev/urandom of="$TEST_BACKUP_DIR/volumes/app_data.bin" bs=1M count=25 2>/dev/null
|
|
|
|
print_status "Creating log files..."
|
|
mkdir -p "$TEST_BACKUP_DIR/logs"
|
|
for i in {1..5}; do
|
|
echo "Log entry $i: $(date)" > "$TEST_BACKUP_DIR/logs/app_$i.log"
|
|
done
|
|
|
|
# Create metadata
|
|
echo "Backup created: $(date)" > "$TEST_BACKUP_DIR/backup_metadata.txt"
|
|
echo "Test data size: $(du -sh "$TEST_BACKUP_DIR" | cut -f1)" >> "$TEST_BACKUP_DIR/backup_metadata.txt"
|
|
|
|
print_status "Test data created in $TEST_BACKUP_DIR"
|
|
}
|
|
|
|
# Function to test compression
|
|
test_compression() {
|
|
print_header "Testing Compression Methods"
|
|
|
|
local test_file="$TEST_BACKUP_DIR/compression_test.dat"
|
|
dd if=/dev/urandom of="$test_file" bs=1M count=10 2>/dev/null
|
|
|
|
for compression in "${COMPRESSION_TYPES[@]}"; do
|
|
print_status "Testing $compression compression..."
|
|
|
|
local start_time=$(date +%s.%N)
|
|
local compressed_file="$test_file.$compression"
|
|
|
|
case $compression in
|
|
"gzip")
|
|
gzip -c "$test_file" > "$compressed_file"
|
|
;;
|
|
"bzip2")
|
|
bzip2 -c "$test_file" > "$compressed_file"
|
|
;;
|
|
"xz")
|
|
xz -c "$test_file" > "$compressed_file"
|
|
;;
|
|
esac
|
|
|
|
local end_time=$(date +%s.%N)
|
|
local duration=$(echo "$end_time - $start_time" | bc -l)
|
|
local original_size=$(stat -c%s "$test_file")
|
|
local compressed_size=$(stat -c%s "$compressed_file")
|
|
local compression_ratio=$(echo "scale=2; $compressed_size * 100 / $original_size" | bc -l)
|
|
|
|
echo "$compression: ${compression_ratio}% of original size, ${duration}s" >> "$OUTPUT_FILE"
|
|
print_status "$compression: ${compression_ratio}% of original size, ${duration}s"
|
|
|
|
# Test decompression
|
|
local decompress_start=$(date +%s.%N)
|
|
case $compression in
|
|
"gzip")
|
|
gunzip -c "$compressed_file" > "$test_file.decompressed"
|
|
;;
|
|
"bzip2")
|
|
bunzip2 -c "$compressed_file" > "$test_file.decompressed"
|
|
;;
|
|
"xz")
|
|
unxz -c "$compressed_file" > "$test_file.decompressed"
|
|
;;
|
|
esac
|
|
local decompress_end=$(date +%s.%N)
|
|
local decompress_time=$(echo "$decompress_end - $decompress_start" | bc -l)
|
|
|
|
# Verify integrity
|
|
if cmp -s "$test_file" "$test_file.decompressed"; then
|
|
echo "$compression decompression: PASSED (${decompress_time}s)" >> "$OUTPUT_FILE"
|
|
print_status "$compression decompression: PASSED (${decompress_time}s)"
|
|
else
|
|
echo "$compression decompression: FAILED" >> "$OUTPUT_FILE"
|
|
print_error "$compression decompression: FAILED"
|
|
fi
|
|
|
|
# Cleanup
|
|
rm -f "$compressed_file" "$test_file.decompressed"
|
|
done
|
|
|
|
rm -f "$test_file"
|
|
}
|
|
|
|
# Function to test encryption
|
|
test_encryption() {
|
|
print_header "Testing Encryption"
|
|
|
|
local test_file="$TEST_BACKUP_DIR/encryption_test.dat"
|
|
local encrypted_file="$test_file.encrypted"
|
|
local decrypted_file="$test_file.decrypted"
|
|
|
|
dd if=/dev/urandom of="$test_file" bs=1M count=5 2>/dev/null
|
|
|
|
print_status "Testing AES-256 encryption..."
|
|
|
|
local start_time=$(date +%s.%N)
|
|
|
|
# Encrypt
|
|
openssl enc -aes-256-cbc -salt -in "$test_file" -out "$encrypted_file" -pass pass:"$ENCRYPTION_KEY" 2>/dev/null
|
|
|
|
local encrypt_end=$(date +%s.%N)
|
|
local encrypt_time=$(echo "$encrypt_end - $start_time" | bc -l)
|
|
|
|
# Decrypt
|
|
local decrypt_start=$(date +%s.%N)
|
|
openssl enc -aes-256-cbc -d -in "$encrypted_file" -out "$decrypted_file" -pass pass:"$ENCRYPTION_KEY" 2>/dev/null
|
|
local decrypt_end=$(date +%s.%N)
|
|
local decrypt_time=$(echo "$decrypt_end - $decrypt_start" | bc -l)
|
|
|
|
# Verify integrity
|
|
if cmp -s "$test_file" "$decrypted_file"; then
|
|
echo "Encryption: PASSED (encrypt: ${encrypt_time}s, decrypt: ${decrypt_time}s)" >> "$OUTPUT_FILE"
|
|
print_status "Encryption: PASSED (encrypt: ${encrypt_time}s, decrypt: ${decrypt_time}s)"
|
|
else
|
|
echo "Encryption: FAILED" >> "$OUTPUT_FILE"
|
|
print_error "Encryption: FAILED"
|
|
fi
|
|
|
|
# Cleanup
|
|
rm -f "$test_file" "$encrypted_file" "$decrypted_file"
|
|
}
|
|
|
|
# Function to test incremental backup
|
|
test_incremental_backup() {
|
|
print_header "Testing Incremental Backup"
|
|
|
|
local base_dir="$TEST_BACKUP_DIR/incremental"
|
|
mkdir -p "$base_dir"
|
|
|
|
# Create initial data
|
|
echo "Initial data" > "$base_dir/file1.txt"
|
|
echo "Initial data" > "$base_dir/file2.txt"
|
|
|
|
# Create initial backup
|
|
local backup1="$TEST_BACKUP_DIR/backup1.tar.gz"
|
|
tar -czf "$backup1" -C "$base_dir" .
|
|
|
|
# Modify data
|
|
echo "Modified data" > "$base_dir/file1.txt"
|
|
echo "New file" > "$base_dir/file3.txt"
|
|
rm -f "$base_dir/file2.txt"
|
|
|
|
# Create incremental backup
|
|
local backup2="$TEST_BACKUP_DIR/backup2.tar.gz"
|
|
tar -czf "$backup2" -C "$base_dir" .
|
|
|
|
# Compare sizes
|
|
local size1=$(stat -c%s "$backup1")
|
|
local size2=$(stat -c%s "$backup2")
|
|
|
|
echo "Incremental backup: Initial ${size1} bytes, Incremental ${size2} bytes" >> "$OUTPUT_FILE"
|
|
print_status "Incremental backup: Initial ${size1} bytes, Incremental ${size2} bytes"
|
|
|
|
# Test restore
|
|
mkdir -p "$TEST_RESTORE_DIR/incremental"
|
|
tar -xzf "$backup2" -C "$TEST_RESTORE_DIR/incremental"
|
|
|
|
if [ -f "$TEST_RESTORE_DIR/incremental/file1.txt" ] && \
|
|
[ -f "$TEST_RESTORE_DIR/incremental/file3.txt" ] && \
|
|
[ ! -f "$TEST_RESTORE_DIR/incremental/file2.txt" ]; then
|
|
echo "Incremental restore: PASSED" >> "$OUTPUT_FILE"
|
|
print_status "Incremental restore: PASSED"
|
|
else
|
|
echo "Incremental restore: FAILED" >> "$OUTPUT_FILE"
|
|
print_error "Incremental restore: FAILED"
|
|
fi
|
|
|
|
# Cleanup
|
|
rm -f "$backup1" "$backup2"
|
|
}
|
|
|
|
# Function to test database backup simulation
|
|
test_database_backup() {
|
|
print_header "Testing Database Backup Simulation"
|
|
|
|
local db_dump="$TEST_BACKUP_DIR/database_backup.sql"
|
|
|
|
# Create simulated database dump
|
|
cat > "$db_dump" << 'EOF'
|
|
-- Simulated database dump
|
|
CREATE TABLE users (
|
|
id INT PRIMARY KEY,
|
|
username VARCHAR(50),
|
|
email VARCHAR(100),
|
|
created_at TIMESTAMP
|
|
);
|
|
|
|
INSERT INTO users VALUES (1, 'testuser', 'test@example.com', NOW());
|
|
INSERT INTO users VALUES (2, 'admin', 'admin@example.com', NOW());
|
|
|
|
-- Simulated configuration
|
|
SET GLOBAL max_connections = 100;
|
|
SET GLOBAL innodb_buffer_pool_size = '1G';
|
|
EOF
|
|
|
|
# Test backup integrity
|
|
if grep -q "CREATE TABLE" "$db_dump" && grep -q "INSERT INTO" "$db_dump"; then
|
|
echo "Database backup simulation: PASSED" >> "$OUTPUT_FILE"
|
|
print_status "Database backup simulation: PASSED"
|
|
else
|
|
echo "Database backup simulation: FAILED" >> "$OUTPUT_FILE"
|
|
print_error "Database backup simulation: FAILED"
|
|
fi
|
|
|
|
# Test compression of database dump
|
|
local compressed_dump="$db_dump.gz"
|
|
gzip -c "$db_dump" > "$compressed_dump"
|
|
|
|
local original_size=$(stat -c%s "$db_dump")
|
|
local compressed_size=$(stat -c%s "$compressed_dump")
|
|
local compression_ratio=$(echo "scale=2; $compressed_size * 100 / $original_size" | bc -l)
|
|
|
|
echo "Database backup compression: ${compression_ratio}% of original size" >> "$OUTPUT_FILE"
|
|
print_status "Database backup compression: ${compression_ratio}% of original size"
|
|
|
|
# Cleanup
|
|
rm -f "$compressed_dump"
|
|
}
|
|
|
|
# Function to test backup verification
|
|
test_backup_verification() {
|
|
print_header "Testing Backup Verification"
|
|
|
|
# Create test backup with checksums
|
|
local test_files=("$TEST_BACKUP_DIR/file1.txt" "$TEST_BACKUP_DIR/file2.txt" "$TEST_BACKUP_DIR/file3.txt")
|
|
|
|
for file in "${test_files[@]}"; do
|
|
echo "Test data for $(basename "$file")" > "$file"
|
|
sha256sum "$file" >> "$TEST_BACKUP_DIR/checksums.txt"
|
|
done
|
|
|
|
# Create backup archive
|
|
local backup_archive="$TEST_BACKUP_DIR/verified_backup.tar.gz"
|
|
tar -czf "$backup_archive" -C "$TEST_BACKUP_DIR" file1.txt file2.txt file3.txt checksums.txt
|
|
|
|
# Test verification
|
|
mkdir -p "$TEST_RESTORE_DIR/verification"
|
|
tar -xzf "$backup_archive" -C "$TEST_RESTORE_DIR/verification"
|
|
|
|
cd "$TEST_RESTORE_DIR/verification"
|
|
if sha256sum -c checksums.txt >/dev/null 2>&1; then
|
|
echo "Backup verification: PASSED" >> "$OUTPUT_FILE"
|
|
print_status "Backup verification: PASSED"
|
|
else
|
|
echo "Backup verification: FAILED" >> "$OUTPUT_FILE"
|
|
print_error "Backup verification: FAILED"
|
|
fi
|
|
|
|
cd - >/dev/null
|
|
}
|
|
|
|
# Function to test backup scheduling
|
|
test_backup_scheduling() {
|
|
print_header "Testing Backup Scheduling"
|
|
|
|
# Simulate backup scheduling
|
|
local schedule_test="$TEST_BACKUP_DIR/schedule_test.txt"
|
|
echo "Backup scheduled at: $(date)" > "$schedule_test"
|
|
|
|
# Test cron-like scheduling
|
|
local cron_entry="0 2 * * * /usr/local/bin/backup_script.sh"
|
|
echo "Cron entry: $cron_entry" >> "$OUTPUT_FILE"
|
|
print_status "Backup scheduling simulation completed"
|
|
|
|
# Test backup rotation
|
|
for i in {1..5}; do
|
|
echo "Backup $i created at $(date)" > "$TEST_BACKUP_DIR/rotated_backup_$i.txt"
|
|
done
|
|
|
|
echo "Backup rotation: 5 test backups created" >> "$OUTPUT_FILE"
|
|
print_status "Backup rotation: 5 test backups created"
|
|
}
|
|
|
|
# Function to generate test report
|
|
generate_report() {
|
|
print_header "Generating Backup/Restore Test Report"
|
|
|
|
echo "=== Backup and Restore Test Report ===" > "$OUTPUT_FILE"
|
|
echo "Date: $(date)" >> "$OUTPUT_FILE"
|
|
echo "Test Directory: $TEST_BACKUP_DIR" >> "$OUTPUT_FILE"
|
|
echo "Restore Directory: $TEST_RESTORE_DIR" >> "$OUTPUT_FILE"
|
|
echo "" >> "$OUTPUT_FILE"
|
|
|
|
# Add system information
|
|
echo "=== System Information ===" >> "$OUTPUT_FILE"
|
|
echo "Hostname: $(hostname)" >> "$OUTPUT_FILE"
|
|
echo "Kernel: $(uname -r)" >> "$OUTPUT_FILE"
|
|
echo "Available disk space: $(df -h "$BACKUP_ROOT" | awk 'NR==2 {print $4}')" >> "$OUTPUT_FILE"
|
|
echo "" >> "$OUTPUT_FILE"
|
|
|
|
print_status "Test report initialized at $OUTPUT_FILE"
|
|
}
|
|
|
|
# Function to cleanup
|
|
cleanup() {
|
|
print_header "Cleaning Up"
|
|
|
|
# Remove test directories
|
|
if [ -d "$TEST_BACKUP_DIR" ]; then
|
|
rm -rf "$TEST_BACKUP_DIR"
|
|
print_status "Removed test backup directory"
|
|
fi
|
|
|
|
if [ -d "$TEST_RESTORE_DIR" ]; then
|
|
rm -rf "$TEST_RESTORE_DIR"
|
|
print_status "Removed test restore directory"
|
|
fi
|
|
}
|
|
|
|
# Function to display usage
|
|
usage() {
|
|
echo "Usage: $0 [output_file]"
|
|
echo " output_file: Path to save test report (default: /tmp/backup_restore_test_report.txt)"
|
|
echo ""
|
|
echo "This script tests backup and restore procedures."
|
|
echo "It validates compression, encryption, incremental backups, and verification."
|
|
}
|
|
|
|
# Main execution
|
|
main() {
|
|
print_header "Starting Backup and Restore Testing"
|
|
|
|
# Check dependencies
|
|
for cmd in tar gzip bzip2 xz openssl sha256sum bc; do
|
|
if ! command -v "$cmd" >/dev/null 2>&1; then
|
|
print_error "Required command '$cmd' not found"
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
# Initialize report
|
|
generate_report
|
|
|
|
# Run tests
|
|
if check_backup_infrastructure; then
|
|
create_test_data
|
|
test_compression
|
|
test_encryption
|
|
test_incremental_backup
|
|
test_database_backup
|
|
test_backup_verification
|
|
test_backup_scheduling
|
|
|
|
print_status "All backup/restore tests completed successfully"
|
|
print_status "Report saved to: $OUTPUT_FILE"
|
|
else
|
|
print_error "Backup infrastructure check failed - cannot proceed with tests"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
# Trap to ensure cleanup on exit
|
|
trap cleanup EXIT
|
|
|
|
# Parse command line arguments
|
|
if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
|
|
usage
|
|
exit 0
|
|
fi
|
|
|
|
# Run main function
|
|
main "$@"
|