Include full contents of all nested repositories
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
472
letsbe-ansible-runner/scripts/backups.sh
Normal file
472
letsbe-ansible-runner/scripts/backups.sh
Normal file
@@ -0,0 +1,472 @@
|
||||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# LetsBe Backup Script
|
||||
# =============================================================================
|
||||
# Backs up databases, env files, nginx configs, and tool configs.
|
||||
# Uploads to rclone remote if configured.
|
||||
# Rotates: 7 daily + 4 weekly backups.
|
||||
#
|
||||
# Usage:
|
||||
# /opt/letsbe/scripts/backups.sh
|
||||
#
|
||||
# Cron (installed by setup.sh):
|
||||
# 0 2 * * * /bin/bash /opt/letsbe/scripts/backups.sh >> /opt/letsbe/logs/backup.log 2>&1
|
||||
# =============================================================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
# =============================================================================
|
||||
# CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
BACKUP_DIR="/tmp/letsbe-backups"
|
||||
DATE=$(date +%Y%m%d_%H%M%S)
|
||||
DAY_OF_WEEK=$(date +%u) # 1=Monday, 7=Sunday
|
||||
RCLONE_REMOTE="backup"
|
||||
LOG_FILE="${LETSBE_BASE}/logs/backup.log"
|
||||
STATUS_FILE="${LETSBE_BASE}/config/backup-status.json"
|
||||
|
||||
# Ensure directories exist
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
mkdir -p "${LETSBE_BASE}/logs"
|
||||
mkdir -p "${LETSBE_BASE}/config"
|
||||
|
||||
# Tracking variables
|
||||
ERRORS=()
|
||||
FILES_BACKED_UP=0
|
||||
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
local msg="$*"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $msg" >&2
|
||||
ERRORS+=("$msg")
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Backup a PostgreSQL database from a running container
|
||||
backup_postgres() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local db_user=${3:-postgres}
|
||||
local label=$4
|
||||
|
||||
# Find container by pattern (supports both prefixed and exact names)
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0 # Container not running, skip silently
|
||||
fi
|
||||
|
||||
log "Backing up PostgreSQL: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/pg_${label}_${DATE}.sql.gz"
|
||||
|
||||
if docker exec "$actual_container" pg_dump -U "$db_user" "$db_name" 2>/dev/null | gzip > "$output_file"; then
|
||||
# Verify the file is not empty (just gzip header)
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "PostgreSQL dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "PostgreSQL dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a MySQL/MariaDB database from a running container
|
||||
backup_mysql() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local db_user=${3:-root}
|
||||
local db_pass=$4
|
||||
local label=$5
|
||||
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up MySQL: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/mysql_${label}_${DATE}.sql.gz"
|
||||
|
||||
if docker exec "$actual_container" mysqldump -u"$db_user" -p"$db_pass" --single-transaction "$db_name" 2>/dev/null | gzip > "$output_file"; then
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MySQL dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MySQL dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a MongoDB database from a running container
|
||||
backup_mongo() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local label=$3
|
||||
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up MongoDB: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/mongo_${label}_${DATE}.archive.gz"
|
||||
|
||||
if docker exec "$actual_container" mongodump --db "$db_name" --archive 2>/dev/null | gzip > "$output_file"; then
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MongoDB dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MongoDB dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a directory as a tarball
|
||||
backup_directory() {
|
||||
local src_dir=$1
|
||||
local label=$2
|
||||
|
||||
if [[ ! -d "$src_dir" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up directory: $label ($src_dir)..."
|
||||
local output_file="${BACKUP_DIR}/dir_${label}_${DATE}.tar.gz"
|
||||
|
||||
if tar czf "$output_file" -C "$(dirname "$src_dir")" "$(basename "$src_dir")" 2>/dev/null; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "Directory backup failed for $label ($src_dir)"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# HELPER: Read credentials from env files
|
||||
# =============================================================================
|
||||
|
||||
# Read a variable from an env file
|
||||
read_env_var() {
|
||||
local file=$1
|
||||
local var_name=$2
|
||||
local default=${3:-}
|
||||
|
||||
if [[ -f "$file" ]]; then
|
||||
local value
|
||||
value=$(grep -E "^${var_name}=" "$file" 2>/dev/null | head -1 | cut -d'=' -f2- | sed 's/^["'"'"']//;s/["'"'"']$//')
|
||||
if [[ -n "$value" ]]; then
|
||||
echo "$value"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
echo "$default"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# START BACKUP
|
||||
# =============================================================================
|
||||
|
||||
log "=== LetsBe Backup Started - $DATE ==="
|
||||
|
||||
# =============================================================================
|
||||
# 1. POSTGRESQL DATABASE BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- PostgreSQL Databases ---"
|
||||
|
||||
# Read credentials from env files where needed
|
||||
CREDS_FILE="${LETSBE_BASE}/env/credentials.env"
|
||||
|
||||
# Chatwoot (user from credentials or default)
|
||||
CHATWOOT_USER=$(read_env_var "$CREDS_FILE" "CHATWOOT_POSTGRES_USERNAME" "chatwoot")
|
||||
backup_postgres "chatwoot-postgres" "chatwoot_production" "$CHATWOOT_USER" "chatwoot"
|
||||
|
||||
# Nextcloud
|
||||
NC_USER=$(read_env_var "$CREDS_FILE" "NEXTCLOUD_POSTGRES_USER" "nextcloud")
|
||||
backup_postgres "nextcloud-postgres" "nextcloud" "$NC_USER" "nextcloud"
|
||||
|
||||
# Keycloak
|
||||
backup_postgres "keycloak-db" "keycloak" "keycloak" "keycloak"
|
||||
|
||||
# n8n
|
||||
N8N_USER=$(read_env_var "${LETSBE_BASE}/env/n8n.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "n8n-postgres" "n8n" "$N8N_USER" "n8n"
|
||||
|
||||
# Cal.com
|
||||
CALCOM_USER=$(read_env_var "${LETSBE_BASE}/env/calcom.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "calcom-postgres" "calcom" "$CALCOM_USER" "calcom"
|
||||
|
||||
# Umami
|
||||
UMAMI_USER=$(read_env_var "$CREDS_FILE" "UMAMI_POSTGRES_USER" "postgres")
|
||||
backup_postgres "umami-db" "umami" "$UMAMI_USER" "umami"
|
||||
|
||||
# NocoDB
|
||||
backup_postgres "nocodb-postgres" "nocodb" "postgres" "nocodb"
|
||||
|
||||
# Typebot
|
||||
backup_postgres "typebot-db" "typebot" "postgres" "typebot"
|
||||
|
||||
# Windmill
|
||||
backup_postgres "windmill-db" "windmill" "postgres" "windmill"
|
||||
|
||||
# GlitchTip
|
||||
backup_postgres "glitchtip-postgres" "postgres" "postgres" "glitchtip"
|
||||
|
||||
# Penpot
|
||||
PENPOT_USER=$(read_env_var "$CREDS_FILE" "PENPOT_DB_USER" "postgres")
|
||||
backup_postgres "penpot-postgres" "penpot" "$PENPOT_USER" "penpot"
|
||||
|
||||
# Gitea
|
||||
GITEA_USER=$(read_env_var "$CREDS_FILE" "GITEA_POSTGRES_USER" "postgres")
|
||||
backup_postgres "gitea-db" "gitea" "$GITEA_USER" "gitea"
|
||||
|
||||
# Odoo
|
||||
ODOO_USER=$(read_env_var "$CREDS_FILE" "ODOO_POSTGRES_USER" "postgres")
|
||||
backup_postgres "odoo-postgres" "postgres" "$ODOO_USER" "odoo"
|
||||
|
||||
# Listmonk
|
||||
LISTMONK_USER=$(read_env_var "$CREDS_FILE" "LISTMONK_DB_USER" "postgres")
|
||||
backup_postgres "listmonk-db" "listmonk" "$LISTMONK_USER" "listmonk"
|
||||
|
||||
# Documenso
|
||||
DOCUMENSO_USER=$(read_env_var "$CREDS_FILE" "DOCUMENSO_POSTGRES_USER" "postgres")
|
||||
backup_postgres "documenso-db" "documenso_db" "$DOCUMENSO_USER" "documenso"
|
||||
|
||||
# Redash (container name may not have customer prefix)
|
||||
REDASH_USER=$(read_env_var "${LETSBE_BASE}/env/redash.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "redash-postgres" "postgres" "$REDASH_USER" "redash"
|
||||
|
||||
# Activepieces (container name may not have customer prefix)
|
||||
ACTIVEPIECES_USER=$(read_env_var "${LETSBE_BASE}/env/activepieces.env" "AP_POSTGRES_USERNAME" "postgres")
|
||||
ACTIVEPIECES_DB=$(read_env_var "${LETSBE_BASE}/env/activepieces.env" "AP_POSTGRES_DATABASE" "activepieces")
|
||||
backup_postgres "activepieces-postgres" "$ACTIVEPIECES_DB" "$ACTIVEPIECES_USER" "activepieces"
|
||||
|
||||
# LibreChat vectordb (pgvector)
|
||||
LIBRECHAT_PG_USER=$(read_env_var "$CREDS_FILE" "LIBRECHAT_POSTGRES_USER" "postgres")
|
||||
backup_postgres "librechat-vectordb" "librechat" "$LIBRECHAT_PG_USER" "librechat-vectordb"
|
||||
# Also try the generic volume-based container name
|
||||
backup_postgres "vectordb" "librechat" "$LIBRECHAT_PG_USER" "librechat-vectordb"
|
||||
|
||||
# Orchestrator
|
||||
backup_postgres "orchestrator-db" "orchestrator" "orchestrator" "orchestrator"
|
||||
|
||||
# =============================================================================
|
||||
# 2. MYSQL / MARIADB DATABASE BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- MySQL/MariaDB Databases ---"
|
||||
|
||||
# WordPress (MariaDB)
|
||||
WP_USER=$(read_env_var "$CREDS_FILE" "WORDPRESS_DB_USER" "root")
|
||||
WP_PASS=$(read_env_var "$CREDS_FILE" "WORDPRESS_DB_PASSWORD" "")
|
||||
WP_ROOT_PASS=$(read_env_var "$CREDS_FILE" "WORDPRESS_MARIADB_ROOT_PASSWORD" "$WP_PASS")
|
||||
if [[ -n "$WP_ROOT_PASS" ]]; then
|
||||
backup_mysql "wordpress-mysql" "wordpress" "root" "$WP_ROOT_PASS" "wordpress"
|
||||
fi
|
||||
|
||||
# Ghost (MySQL)
|
||||
GHOST_PASS=$(read_env_var "$CREDS_FILE" "GHOST_MYSQL_PASSWORD" "")
|
||||
if [[ -n "$GHOST_PASS" ]]; then
|
||||
backup_mysql "ghost-db" "ghost" "root" "$GHOST_PASS" "ghost"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 3. MONGODB BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- MongoDB Databases ---"
|
||||
|
||||
# LibreChat MongoDB
|
||||
backup_mongo "librechat-mongodb" "LibreChat" "librechat"
|
||||
|
||||
# =============================================================================
|
||||
# 4. ENV FILES BACKUP
|
||||
# =============================================================================
|
||||
|
||||
log "--- Configuration Backups ---"
|
||||
|
||||
backup_directory "${LETSBE_BASE}/env" "env-files"
|
||||
|
||||
# =============================================================================
|
||||
# 5. NGINX CONFIGS BACKUP
|
||||
# =============================================================================
|
||||
|
||||
backup_directory "${LETSBE_BASE}/nginx" "nginx-configs"
|
||||
|
||||
# Also backup active nginx sites
|
||||
if [[ -d "/etc/nginx/sites-enabled" ]]; then
|
||||
backup_directory "/etc/nginx/sites-enabled" "nginx-sites-enabled"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 6. TOOL CONFIGS BACKUP
|
||||
# =============================================================================
|
||||
|
||||
backup_directory "${LETSBE_BASE}/config" "letsbe-config"
|
||||
|
||||
# Backup rclone config if it exists
|
||||
if [[ -f "/root/.config/rclone/rclone.conf" ]]; then
|
||||
log "Backing up rclone config..."
|
||||
cp "/root/.config/rclone/rclone.conf" "${BACKUP_DIR}/rclone_conf_${DATE}.conf"
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
fi
|
||||
|
||||
# Backup crontab
|
||||
log "Backing up crontab..."
|
||||
crontab -l > "${BACKUP_DIR}/crontab_${DATE}.txt" 2>/dev/null && FILES_BACKED_UP=$((FILES_BACKED_UP + 1)) || true
|
||||
|
||||
# =============================================================================
|
||||
# 7. UPLOAD TO RCLONE REMOTE
|
||||
# =============================================================================
|
||||
|
||||
log "--- Remote Upload ---"
|
||||
|
||||
if command -v rclone &> /dev/null; then
|
||||
if rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
log "Uploading backups to ${RCLONE_REMOTE}:letsbe-backups/${DATE}/..."
|
||||
if rclone copy "$BACKUP_DIR" "${RCLONE_REMOTE}:letsbe-backups/${DATE}/" --quiet 2>&1; then
|
||||
log "Upload complete."
|
||||
else
|
||||
log_error "rclone upload failed"
|
||||
fi
|
||||
else
|
||||
log "WARNING: rclone remote '${RCLONE_REMOTE}' not configured. Backups stored locally only."
|
||||
fi
|
||||
else
|
||||
log "WARNING: rclone not installed. Backups stored locally only."
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 8. ROTATION: Keep 7 daily + 4 weekly
|
||||
# =============================================================================
|
||||
|
||||
log "--- Backup Rotation ---"
|
||||
|
||||
# Daily cleanup: remove files older than 7 days
|
||||
find "$BACKUP_DIR" -maxdepth 1 -type f -mtime +7 -delete 2>/dev/null || true
|
||||
log "Local daily rotation applied (7 days)."
|
||||
|
||||
# Weekly rotation on remote (keep 4 weeks)
|
||||
if command -v rclone &> /dev/null && rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
# If today is Sunday (day 7), copy today's backup as a weekly backup
|
||||
if [[ "$DAY_OF_WEEK" -eq 7 ]]; then
|
||||
WEEK_NUM=$(date +%Y-W%V)
|
||||
log "Creating weekly backup: ${WEEK_NUM}"
|
||||
rclone copy "${RCLONE_REMOTE}:letsbe-backups/${DATE}/" "${RCLONE_REMOTE}:letsbe-backups/weekly/${WEEK_NUM}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Remove daily remote backups older than 7 days
|
||||
# List remote directories and delete old ones
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/" 2>/dev/null | while read -r _ _ _ dirname; do
|
||||
# Skip 'weekly' directory
|
||||
[[ "$dirname" == "weekly" ]] && continue
|
||||
# Parse date from directory name (format: YYYYMMDD_HHMMSS)
|
||||
dir_date=$(echo "$dirname" | cut -c1-8)
|
||||
if [[ "$dir_date" =~ ^[0-9]{8}$ ]]; then
|
||||
dir_epoch=$(date -d "${dir_date:0:4}-${dir_date:4:2}-${dir_date:6:2}" +%s 2>/dev/null || echo "0")
|
||||
cutoff_epoch=$(date -d "7 days ago" +%s 2>/dev/null || echo "0")
|
||||
if [[ "$dir_epoch" -gt 0 && "$cutoff_epoch" -gt 0 && "$dir_epoch" -lt "$cutoff_epoch" ]]; then
|
||||
log "Removing old remote daily: $dirname"
|
||||
rclone purge "${RCLONE_REMOTE}:letsbe-backups/${dirname}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove weekly backups older than 4 weeks
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/weekly/" 2>/dev/null | while read -r _ _ _ dirname; do
|
||||
# Parse week from directory name (format: YYYY-WNN)
|
||||
week_year=$(echo "$dirname" | cut -d'-' -f1)
|
||||
week_num=$(echo "$dirname" | sed 's/.*W//')
|
||||
if [[ "$week_year" =~ ^[0-9]{4}$ && "$week_num" =~ ^[0-9]+$ ]]; then
|
||||
current_year=$(date +%Y)
|
||||
current_week=$(date +%V)
|
||||
# Calculate approximate age in weeks
|
||||
age_weeks=$(( (current_year - week_year) * 52 + (current_week - week_num) ))
|
||||
if [[ "$age_weeks" -gt 4 ]]; then
|
||||
log "Removing old remote weekly: $dirname"
|
||||
rclone purge "${RCLONE_REMOTE}:letsbe-backups/weekly/${dirname}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 9. STATUS FILE
|
||||
# =============================================================================
|
||||
|
||||
# Calculate total backup size
|
||||
TOTAL_SIZE_BYTES=$(du -sb "$BACKUP_DIR" 2>/dev/null | cut -f1 || echo "0")
|
||||
TOTAL_SIZE_MB=$(( TOTAL_SIZE_BYTES / 1048576 ))
|
||||
|
||||
# Determine status
|
||||
if [[ ${#ERRORS[@]} -eq 0 ]]; then
|
||||
STATUS="success"
|
||||
else
|
||||
STATUS="partial"
|
||||
fi
|
||||
|
||||
# Build errors JSON array
|
||||
ERRORS_JSON="[]"
|
||||
if [[ ${#ERRORS[@]} -gt 0 ]]; then
|
||||
ERRORS_JSON="["
|
||||
for i in "${!ERRORS[@]}"; do
|
||||
# Escape quotes in error messages
|
||||
escaped=$(echo "${ERRORS[$i]}" | sed 's/"/\\"/g')
|
||||
if [[ $i -gt 0 ]]; then
|
||||
ERRORS_JSON+=","
|
||||
fi
|
||||
ERRORS_JSON+="\"${escaped}\""
|
||||
done
|
||||
ERRORS_JSON+="]"
|
||||
fi
|
||||
|
||||
cat > "$STATUS_FILE" <<EOF
|
||||
{
|
||||
"last_run": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
|
||||
"status": "${STATUS}",
|
||||
"size_mb": ${TOTAL_SIZE_MB},
|
||||
"files_backed_up": ${FILES_BACKED_UP},
|
||||
"errors": ${ERRORS_JSON}
|
||||
}
|
||||
EOF
|
||||
|
||||
# =============================================================================
|
||||
# DONE
|
||||
# =============================================================================
|
||||
|
||||
log "=== Backup Complete ==="
|
||||
log "Status: ${STATUS}"
|
||||
log "Files backed up: ${FILES_BACKED_UP}"
|
||||
log "Total size: ${TOTAL_SIZE_MB} MB"
|
||||
log "Local backups: ${BACKUP_DIR}"
|
||||
if [[ ${#ERRORS[@]} -gt 0 ]]; then
|
||||
log "Errors (${#ERRORS[@]}):"
|
||||
for err in "${ERRORS[@]}"; do
|
||||
log " - $err"
|
||||
done
|
||||
fi
|
||||
677
letsbe-ansible-runner/scripts/env_setup.sh
Normal file
677
letsbe-ansible-runner/scripts/env_setup.sh
Normal file
@@ -0,0 +1,677 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# LetsBe Cloud Environment Setup Script
|
||||
# Non-interactive version for Orchestrator/SysAdmin Agent integration
|
||||
#
|
||||
# Usage:
|
||||
# ./env_setup.sh --customer "acme" --domain "acme.com" --company "Acme Corp"
|
||||
# ./env_setup.sh --json '{"customer":"acme","domain":"acme.com","company_name":"Acme Corp"}'
|
||||
# ./env_setup.sh --config /path/to/config.json
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
STACKS_DIR="${LETSBE_BASE}/stacks"
|
||||
NGINX_DIR="${LETSBE_BASE}/nginx"
|
||||
ENV_DIR="${LETSBE_BASE}/env"
|
||||
SCRIPTS_DIR="${LETSBE_BASE}/scripts"
|
||||
|
||||
# ============================================================================
|
||||
# HELPER FUNCTIONS
|
||||
# ============================================================================
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Required (one of):
|
||||
--customer NAME Customer name (lowercase, no spaces/hyphens/numbers)
|
||||
--domain DOMAIN Main domain without subdomains (lowercase)
|
||||
--company NAME Company name (can include spaces)
|
||||
|
||||
Or provide all via JSON:
|
||||
--json JSON_STRING JSON object with customer, domain, company_name
|
||||
--config FILE Path to JSON config file
|
||||
|
||||
Example:
|
||||
$0 --customer acme --domain acme.com --company "Acme Corporation"
|
||||
$0 --json '{"customer":"acme","domain":"acme.com","company_name":"Acme Corp"}'
|
||||
$0 --config /opt/letsbe/config/setup.json
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
log_info() {
|
||||
echo "[INFO] $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[ERROR] $*" >&2
|
||||
}
|
||||
|
||||
die() {
|
||||
log_error "$*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Generate random string of specified length
|
||||
generate_random_string() {
|
||||
local length=$1
|
||||
tr -dc A-Za-z0-9 </dev/urandom | head -c "${length}"
|
||||
echo ''
|
||||
}
|
||||
|
||||
# Validate required variables are set
|
||||
validate_required() {
|
||||
local var_name=$1
|
||||
local var_value=$2
|
||||
if [[ -z "${var_value}" ]]; then
|
||||
die "Required variable '${var_name}' is not set"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse JSON input using jq
|
||||
parse_json() {
|
||||
local json_input=$1
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
die "jq is required for JSON parsing. Install with: apt-get install jq"
|
||||
fi
|
||||
|
||||
customer=$(echo "${json_input}" | jq -r '.customer // empty')
|
||||
domain=$(echo "${json_input}" | jq -r '.domain // empty')
|
||||
company_name=$(echo "${json_input}" | jq -r '.company_name // empty')
|
||||
|
||||
# Hub / Licensing configuration
|
||||
license_key=$(echo "${json_input}" | jq -r '.license_key // empty')
|
||||
hub_url=$(echo "${json_input}" | jq -r '.hub_url // "https://hub.letsbe.biz"')
|
||||
hub_telemetry_enabled=$(echo "${json_input}" | jq -r '.hub_telemetry_enabled // true')
|
||||
|
||||
# Optional server IP override (auto-detected if not provided)
|
||||
local json_server_ip
|
||||
json_server_ip=$(echo "${json_input}" | jq -r '.server_ip // empty')
|
||||
if [[ -n "${json_server_ip}" ]]; then
|
||||
server_ip_override="${json_server_ip}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# ARGUMENT PARSING
|
||||
# ============================================================================
|
||||
|
||||
customer=""
|
||||
domain=""
|
||||
company_name=""
|
||||
docker_user=""
|
||||
server_ip_override=""
|
||||
|
||||
# Hub / Licensing configuration
|
||||
license_key=""
|
||||
hub_api_key=""
|
||||
hub_url="https://hub.letsbe.biz"
|
||||
hub_telemetry_enabled="true"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--customer)
|
||||
customer="$2"
|
||||
shift 2
|
||||
;;
|
||||
--domain)
|
||||
domain="$2"
|
||||
shift 2
|
||||
;;
|
||||
--company)
|
||||
company_name="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-user)
|
||||
docker_user="$2"
|
||||
shift 2
|
||||
;;
|
||||
--license-key)
|
||||
license_key="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-url)
|
||||
hub_url="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-api-key)
|
||||
hub_api_key="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-telemetry)
|
||||
hub_telemetry_enabled="$2"
|
||||
shift 2
|
||||
;;
|
||||
--json)
|
||||
parse_json "$2"
|
||||
shift 2
|
||||
;;
|
||||
--config)
|
||||
if [[ ! -f "$2" ]]; then
|
||||
die "Config file not found: $2"
|
||||
fi
|
||||
parse_json "$(cat "$2")"
|
||||
shift 2
|
||||
;;
|
||||
--help|-h)
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# VALIDATION
|
||||
# ============================================================================
|
||||
|
||||
validate_required "customer" "${customer}"
|
||||
validate_required "domain" "${domain}"
|
||||
validate_required "company_name" "${company_name}"
|
||||
|
||||
# Validate customer format (lowercase, no spaces/hyphens/numbers)
|
||||
if [[ ! "${customer}" =~ ^[a-z]+$ ]]; then
|
||||
die "Customer name must be lowercase letters only, no spaces/hyphens/numbers: ${customer}"
|
||||
fi
|
||||
|
||||
# Validate domain format
|
||||
if [[ ! "${domain}" =~ ^[a-z0-9.-]+\.[a-z]{2,}$ ]]; then
|
||||
die "Invalid domain format: ${domain}"
|
||||
fi
|
||||
|
||||
log_info "Configuration validated"
|
||||
log_info " Customer: ${customer}"
|
||||
log_info " Domain: ${domain}"
|
||||
log_info " Company: ${company_name}"
|
||||
|
||||
# ============================================================================
|
||||
# DERIVED VARIABLES
|
||||
# ============================================================================
|
||||
|
||||
# Email for Let's Encrypt
|
||||
letsencrypt_email="postmaster@${domain}"
|
||||
|
||||
# Subdomains per tool
|
||||
domain_html="html.${domain}"
|
||||
domain_wordpress="${domain}"
|
||||
domain_squidex="contenthub.${domain}"
|
||||
domain_chatwoot="support.${domain}"
|
||||
domain_chatwoot_helpdesk="helpdesk.${domain}"
|
||||
domain_gitea="code.${domain}"
|
||||
domain_gitea_drone="ci.${domain}"
|
||||
domain_glitchtip="debug.${domain}"
|
||||
domain_listmonk="newsletters.${domain}"
|
||||
domain_n8n="n8n.${domain}"
|
||||
domain_nextcloud="cloud.${domain}"
|
||||
domain_penpot="design.${domain}"
|
||||
domain_poste="mail.${domain}"
|
||||
domain_umami="analytics.${domain}"
|
||||
domain_uptime_kuma="uptime.${domain}"
|
||||
domain_windmill="flows.${domain}"
|
||||
domain_calcom="bookings.${domain}"
|
||||
domain_odoo="crm.${domain}"
|
||||
domain_collabora="collabora.${domain}"
|
||||
domain_whiteboard="whiteboard.${domain}"
|
||||
domain_signaling="signaling.${domain}"
|
||||
domain_activepieces="automation.${domain}"
|
||||
domain_minio="minio.${domain}"
|
||||
domain_s3="s3.${domain}"
|
||||
domain_librechat="ai.${domain}"
|
||||
domain_bot_viewer="bots.${domain}"
|
||||
domain_botlab="botlab.${domain}"
|
||||
domain_nocodb="database.${domain}"
|
||||
domain_redash="data.${domain}"
|
||||
domain_documenso="signatures.${domain}"
|
||||
domain_keycloak="auth.${domain}"
|
||||
domain_pdf="pdf.${domain}"
|
||||
domain_portainer="portainer.${domain}"
|
||||
domain_ghost="${domain}"
|
||||
|
||||
# ============================================================================
|
||||
# GENERATED SECRETS
|
||||
# ============================================================================
|
||||
|
||||
log_info "Generating secrets and credentials..."
|
||||
|
||||
# WordPress
|
||||
wordpresss_mariadb_root_password=$(generate_random_string 20)
|
||||
wordpress_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
wordpress_db_password=$(generate_random_string 20)
|
||||
|
||||
# Squidex
|
||||
squidex_adminemail="postmaster@${domain}"
|
||||
squidex_adminpassword=$(generate_random_string 20)
|
||||
|
||||
# Listmonk
|
||||
listmonk_admin_username=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
listmonk_admin_password=$(generate_random_string 20)
|
||||
listmonk_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
listmonk_db_password=$(generate_random_string 20)
|
||||
|
||||
# Gitea
|
||||
gitea_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
gitea_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Umami
|
||||
umami_app_secret=$(generate_random_string 32)
|
||||
umami_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
umami_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Drone/Gitea
|
||||
drone_gitea_rpc_secret=$(generate_random_string 32)
|
||||
|
||||
# Windmill
|
||||
windmill_database_password=$(generate_random_string 20)
|
||||
|
||||
# Glitchtip
|
||||
glitchtip_database_password=$(generate_random_string 20)
|
||||
glitchtip_secret_key=$(generate_random_string 32)
|
||||
|
||||
# Penpot
|
||||
penpot_secret_key=$(generate_random_string 32)
|
||||
penpot_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
penpot_db_password=$(generate_random_string 20)
|
||||
|
||||
# Nextcloud
|
||||
nextcloud_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
nextcloud_postgres_password=$(generate_random_string 20)
|
||||
nextcloud_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
nextcloud_admin_password=$(generate_random_string 20)
|
||||
|
||||
# Collabora
|
||||
collabora_password=$(generate_random_string 20)
|
||||
collabora_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Nextcloud Talk HPB / TURN
|
||||
turn_secret=$(openssl rand -hex 32)
|
||||
signaling_secret=$(openssl rand -hex 32)
|
||||
internal_secret=$(openssl rand -hex 32)
|
||||
|
||||
# Server public IP (needed for coturn external-ip)
|
||||
if [[ -n "${server_ip_override}" ]]; then
|
||||
server_ip="${server_ip_override}"
|
||||
log_info "Using provided server IP: ${server_ip}"
|
||||
else
|
||||
server_ip=$(curl -4 -s --max-time 10 ifconfig.co || curl -4 -s --max-time 10 icanhazip.com || echo "REPLACE_WITH_SERVER_IP")
|
||||
log_info "Auto-detected server IP: ${server_ip}"
|
||||
fi
|
||||
|
||||
# Chatwoot
|
||||
chatwoot_secret_key_base=$(generate_random_string 32)
|
||||
chatwoot_redis_password=$(generate_random_string 20)
|
||||
chatwoot_postgres_username=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
chatwoot_postgres_password=$(generate_random_string 20)
|
||||
chatwoot_rails_inbound_email_password=$(generate_random_string 20)
|
||||
|
||||
# N8N
|
||||
n8n_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
n8n_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Cal.com
|
||||
calcom_nextauth_secret=$(generate_random_string 32)
|
||||
calcom_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
calcom_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Odoo
|
||||
odoo_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
odoo_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Activepieces
|
||||
activepieces_api_key=$(generate_random_string 32)
|
||||
activepieces_encryption_key=$(generate_random_string 32 | tr '[:upper:]' '[:lower:]')
|
||||
activepieces_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
activepieces_postgres_password=$(generate_random_string 32)
|
||||
|
||||
# MinIO
|
||||
minio_root_user=$(generate_random_string 16)
|
||||
minio_root_password=$(generate_random_string 32)
|
||||
|
||||
# Typebot
|
||||
typebot_encryption_secret=$(generate_random_string 32)
|
||||
typebot_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# NocoDB
|
||||
nocodb_postgres_password=$(generate_random_string 32)
|
||||
|
||||
# LibreChat
|
||||
librechat_postgres_password=$(generate_random_string 20)
|
||||
librechat_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
librechat_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
librechat_jwt_refresh_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Redash
|
||||
redash_secret_key=$(generate_random_string 32)
|
||||
redash_cookie_secret=$(generate_random_string 32)
|
||||
redash_postgres_password=$(generate_random_string 20)
|
||||
redash_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Documenso
|
||||
documenso_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
documenso_postgres_password=$(generate_random_string 40)
|
||||
documenso_nextauth_secret=$(generate_random_string 32)
|
||||
documenso_encryption_key=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
documenso_encryption_secondary_key=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Ghost
|
||||
ghost_mysql_password=$(generate_random_string 40)
|
||||
ghost_s3_access_key=$(generate_random_string 20)
|
||||
ghost_s3_secret_key=$(generate_random_string 40)
|
||||
|
||||
# Keycloak
|
||||
keycloak_postgres_password=$(generate_random_string 40)
|
||||
keycloak_admin_password=$(generate_random_string 40)
|
||||
keycloak_grafana_password=$(generate_random_string 40)
|
||||
|
||||
# Portainer (admin credentials for automated setup)
|
||||
# Note: --admin-password-file expects PLAIN TEXT password (not bcrypt hash)
|
||||
# The username is always "admin" - cannot be changed via CLI
|
||||
portainer_admin_user="admin"
|
||||
portainer_admin_password=$(generate_random_string 24)
|
||||
|
||||
# StirlingPDF
|
||||
stirlingpdf_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
stirlingpdf_postgres_password=$(generate_random_string 40)
|
||||
stirlingpdf_api_key=$(generate_random_string 40)
|
||||
|
||||
# Sysadmin Agent
|
||||
# Registration token is now auto-generated by local_bootstrap.sh after license validation
|
||||
# It calls the local orchestrator API to create a token
|
||||
# Legacy: can still be provided via SYSADMIN_REGISTRATION_TOKEN env var for backwards compatibility
|
||||
sysadmin_registration_token="${SYSADMIN_REGISTRATION_TOKEN:-PLACEHOLDER_GENERATED_BY_BOOTSTRAP}"
|
||||
# Legacy token (deprecated, kept for backward compatibility)
|
||||
sysadmin_agent_token=$(generate_random_string 64)
|
||||
|
||||
# ============================================================================
|
||||
# ORCHESTRATOR / HUB CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
# Orchestrator database password (for local orchestrator)
|
||||
orchestrator_db_password=$(generate_random_string 40)
|
||||
|
||||
# Admin API key for orchestrator management
|
||||
admin_api_key=$(generate_random_string 64)
|
||||
|
||||
# Instance ID (must match what's registered in Hub)
|
||||
instance_id="${customer}-orchestrator"
|
||||
|
||||
# Orchestrator URL for sysadmin agent
|
||||
# LOCAL_MODE uses host.docker.internal to reach orchestrator from agent container
|
||||
orchestrator_url="http://host.docker.internal:8100"
|
||||
|
||||
# Local agent key for LOCAL_MODE authentication
|
||||
# Shared secret between orchestrator and sysadmin agent
|
||||
local_agent_key=$(generate_random_string 64)
|
||||
|
||||
# Validate license key (required for official installations)
|
||||
if [[ -z "${license_key}" || "${license_key}" == "lb_inst_YOUR_LICENSE_KEY_HERE" ]]; then
|
||||
log_error "=============================================="
|
||||
log_error "LICENSE KEY REQUIRED"
|
||||
log_error "=============================================="
|
||||
log_error ""
|
||||
log_error "A valid license key is required for official LetsBe installations."
|
||||
log_error ""
|
||||
log_error "To obtain a license key:"
|
||||
log_error "1. Contact LetsBe to create a client account in Hub"
|
||||
log_error "2. Request an instance to be created for your deployment"
|
||||
log_error "3. Add the license_key to your config.json:"
|
||||
log_error " {\"license_key\": \"lb_inst_...\"}"
|
||||
log_error ""
|
||||
log_error "=============================================="
|
||||
die "Missing or invalid license_key in config.json"
|
||||
fi
|
||||
|
||||
log_info "Hub configuration:"
|
||||
log_info " License Key: ${license_key:0:12}..."
|
||||
log_info " Hub URL: ${hub_url}"
|
||||
log_info " Instance ID: ${instance_id}"
|
||||
log_info " Telemetry: ${hub_telemetry_enabled}"
|
||||
|
||||
# ============================================================================
|
||||
# TEMPLATE REPLACEMENT
|
||||
# ============================================================================
|
||||
|
||||
log_info "Replacing placeholders in template files..."
|
||||
|
||||
# Process all template files
|
||||
for file in "${STACKS_DIR}"/*/* "${STACKS_DIR}"/*/.* "${NGINX_DIR}"/* "${SCRIPTS_DIR}"/backups.sh; do
|
||||
if [[ -f "${file}" ]]; then
|
||||
# Core variables
|
||||
sed -i "s/{{ customer }}/${customer}/g" "${file}"
|
||||
sed -i "s/{{ domain }}/${domain}/g" "${file}"
|
||||
sed -i "s/{{ company_name }}/${company_name}/g" "${file}"
|
||||
sed -i "s/{{ letsencrypt_email }}/${letsencrypt_email}/g" "${file}"
|
||||
|
||||
# Domain variables
|
||||
sed -i "s/{{ domain_html }}/${domain_html}/g" "${file}"
|
||||
sed -i "s/{{ domain_wordpress }}/${domain_wordpress}/g" "${file}"
|
||||
sed -i "s/{{ domain_squidex }}/${domain_squidex}/g" "${file}"
|
||||
sed -i "s/{{ domain_chatwoot }}/${domain_chatwoot}/g" "${file}"
|
||||
sed -i "s/{{ domain_chatwoot_helpdesk }}/${domain_chatwoot_helpdesk}/g" "${file}"
|
||||
sed -i "s/{{ domain_gitea }}/${domain_gitea}/g" "${file}"
|
||||
sed -i "s/{{ domain_gitea_drone }}/${domain_gitea_drone}/g" "${file}"
|
||||
sed -i "s/{{ domain_glitchtip }}/${domain_glitchtip}/g" "${file}"
|
||||
sed -i "s/{{ domain_listmonk }}/${domain_listmonk}/g" "${file}"
|
||||
sed -i "s/{{ domain_librechat }}/${domain_librechat}/g" "${file}"
|
||||
sed -i "s/{{ domain_n8n }}/${domain_n8n}/g" "${file}"
|
||||
sed -i "s/{{ domain_nextcloud }}/${domain_nextcloud}/g" "${file}"
|
||||
sed -i "s/{{ domain_penpot }}/${domain_penpot}/g" "${file}"
|
||||
sed -i "s/{{ domain_poste }}/${domain_poste}/g" "${file}"
|
||||
sed -i "s/{{ domain_umami }}/${domain_umami}/g" "${file}"
|
||||
sed -i "s/{{ domain_uptime_kuma }}/${domain_uptime_kuma}/g" "${file}"
|
||||
sed -i "s/{{ domain_windmill }}/${domain_windmill}/g" "${file}"
|
||||
sed -i "s/{{ domain_calcom }}/${domain_calcom}/g" "${file}"
|
||||
sed -i "s/{{ domain_odoo }}/${domain_odoo}/g" "${file}"
|
||||
sed -i "s/{{ domain_collabora }}/${domain_collabora}/g" "${file}"
|
||||
sed -i "s/{{ domain_activepieces }}/${domain_activepieces}/g" "${file}"
|
||||
sed -i "s/{{ domain_bot_viewer }}/${domain_bot_viewer}/g" "${file}"
|
||||
sed -i "s/{{ domain_botlab }}/${domain_botlab}/g" "${file}"
|
||||
sed -i "s/{{ domain_minio }}/${domain_minio}/g" "${file}"
|
||||
sed -i "s/{{ domain_s3 }}/${domain_s3}/g" "${file}"
|
||||
sed -i "s/{{ domain_nocodb }}/${domain_nocodb}/g" "${file}"
|
||||
sed -i "s/{{ domain_whiteboard }}/${domain_whiteboard}/g" "${file}"
|
||||
sed -i "s/{{ domain_signaling }}/${domain_signaling}/g" "${file}"
|
||||
sed -i "s/{{ domain_redash }}/${domain_redash}/g" "${file}"
|
||||
sed -i "s/{{ domain_documenso }}/${domain_documenso}/g" "${file}"
|
||||
sed -i "s/{{ domain_keycloak }}/${domain_keycloak}/g" "${file}"
|
||||
sed -i "s/{{ domain_pdf }}/${domain_pdf}/g" "${file}"
|
||||
sed -i "s/{{ domain_portainer }}/${domain_portainer}/g" "${file}"
|
||||
sed -i "s/{{ domain_ghost }}/${domain_ghost}/g" "${file}"
|
||||
|
||||
# Credential variables
|
||||
sed -i "s/{{ wordpresss_mariadb_root_password }}/${wordpresss_mariadb_root_password}/g" "${file}"
|
||||
sed -i "s/{{ wordpress_db_user }}/${wordpress_db_user}/g" "${file}"
|
||||
sed -i "s/{{ wordpress_db_password }}/${wordpress_db_password}/g" "${file}"
|
||||
sed -i "s/{{ squidex_adminemail }}/${squidex_adminemail}/g" "${file}"
|
||||
sed -i "s/{{ squidex_adminpassword }}/${squidex_adminpassword}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_admin_username }}/${listmonk_admin_username}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_admin_password }}/${listmonk_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_db_user }}/${listmonk_db_user}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_db_password }}/${listmonk_db_password}/g" "${file}"
|
||||
sed -i "s/{{ gitea_postgres_user }}/${gitea_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ gitea_postgres_password }}/${gitea_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ umami_app_secret }}/${umami_app_secret}/g" "${file}"
|
||||
sed -i "s/{{ umami_postgres_user }}/${umami_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ umami_postgres_password }}/${umami_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ drone_gitea_rpc_secret }}/${drone_gitea_rpc_secret}/g" "${file}"
|
||||
sed -i "s/{{ windmill_database_password }}/${windmill_database_password}/g" "${file}"
|
||||
sed -i "s/{{ glitchtip_database_password }}/${glitchtip_database_password}/g" "${file}"
|
||||
sed -i "s/{{ glitchtip_secret_key }}/${glitchtip_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ penpot_secret_key }}/${penpot_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ penpot_db_user }}/${penpot_db_user}/g" "${file}"
|
||||
sed -i "s/{{ penpot_db_password }}/${penpot_db_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_postgres_user }}/${nextcloud_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_postgres_password }}/${nextcloud_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_admin_password }}/${nextcloud_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_jwt_secret }}/${nextcloud_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ collabora_password }}/${collabora_password}/g" "${file}"
|
||||
sed -i "s/{{ collabora_user }}/${collabora_user}/g" "${file}"
|
||||
sed -i "s/{{ turn_secret }}/${turn_secret}/g" "${file}"
|
||||
sed -i "s/{{ signaling_secret }}/${signaling_secret}/g" "${file}"
|
||||
sed -i "s/{{ internal_secret }}/${internal_secret}/g" "${file}"
|
||||
sed -i "s/{{ server_ip }}/${server_ip}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_secret_key_base }}/${chatwoot_secret_key_base}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_redis_password }}/${chatwoot_redis_password}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_postgres_username }}/${chatwoot_postgres_username}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_postgres_password }}/${chatwoot_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_rails_inbound_email_password }}/${chatwoot_rails_inbound_email_password}/g" "${file}"
|
||||
sed -i "s/{{ n8n_postgres_user }}/${n8n_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ n8n_postgres_password }}/${n8n_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ calcom_nextauth_secret }}/${calcom_nextauth_secret}/g" "${file}"
|
||||
sed -i "s/{{ calcom_postgres_user }}/${calcom_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ calcom_postgres_password }}/${calcom_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ odoo_postgres_user }}/${odoo_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ odoo_postgres_password }}/${odoo_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_api_key }}/${activepieces_api_key}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_encryption_key }}/${activepieces_encryption_key}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_jwt_secret }}/${activepieces_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_postgres_password }}/${activepieces_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ minio_root_user }}/${minio_root_user}/g" "${file}"
|
||||
sed -i "s/{{ minio_root_password }}/${minio_root_password}/g" "${file}"
|
||||
sed -i "s/{{ typebot_encryption_secret }}/${typebot_encryption_secret}/g" "${file}"
|
||||
sed -i "s/{{ nocodb_postgres_password }}/${nocodb_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ typebot_postgres_password }}/${typebot_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ redash_secret_key }}/${redash_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ redash_cookie_secret }}/${redash_cookie_secret}/g" "${file}"
|
||||
sed -i "s/{{ redash_postgres_user }}/${redash_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ redash_postgres_password }}/${redash_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ librechat_postgres_password }}/${librechat_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ librechat_postgres_user }}/${librechat_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ librechat_jwt_secret }}/${librechat_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ librechat_jwt_refresh_secret }}/${librechat_jwt_refresh_secret}/g" "${file}"
|
||||
sed -i "s/{{ documenso_postgres_user }}/${documenso_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ documenso_postgres_password }}/${documenso_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ documenso_nextauth_secret }}/${documenso_nextauth_secret}/g" "${file}"
|
||||
sed -i "s/{{ documenso_encryption_key }}/${documenso_encryption_key}/g" "${file}"
|
||||
sed -i "s/{{ documenso_encryption_secondary_key }}/${documenso_encryption_secondary_key}/g" "${file}"
|
||||
sed -i "s/{{ ghost_mysql_password }}/${ghost_mysql_password}/g" "${file}"
|
||||
sed -i "s/{{ ghost_s3_access_key }}/${ghost_s3_access_key}/g" "${file}"
|
||||
sed -i "s/{{ ghost_s3_secret_key }}/${ghost_s3_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_postgres_password }}/${keycloak_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_admin_password }}/${keycloak_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_grafana_password }}/${keycloak_grafana_password}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_postgres_user }}/${stirlingpdf_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_postgres_password }}/${stirlingpdf_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_api_key }}/${stirlingpdf_api_key}/g" "${file}"
|
||||
sed -i "s/{{ sysadmin_agent_token }}/${sysadmin_agent_token}/g" "${file}"
|
||||
sed -i "s/{{ sysadmin_registration_token }}/${sysadmin_registration_token}/g" "${file}"
|
||||
|
||||
# Hub / Orchestrator variables
|
||||
sed -i "s/{{ license_key }}/${license_key}/g" "${file}"
|
||||
sed -i "s/{{ hub_api_key }}/${hub_api_key}/g" "${file}"
|
||||
sed -i "s|{{ hub_url }}|${hub_url}|g" "${file}"
|
||||
sed -i "s/{{ hub_telemetry_enabled }}/${hub_telemetry_enabled}/g" "${file}"
|
||||
sed -i "s/{{ instance_id }}/${instance_id}/g" "${file}"
|
||||
sed -i "s/{{ orchestrator_db_password }}/${orchestrator_db_password}/g" "${file}"
|
||||
sed -i "s/{{ admin_api_key }}/${admin_api_key}/g" "${file}"
|
||||
sed -i "s|{{ orchestrator_url }}|${orchestrator_url}|g" "${file}"
|
||||
sed -i "s/{{ local_agent_key }}/${local_agent_key}/g" "${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
log_info "All placeholders replaced successfully."
|
||||
|
||||
# ============================================================================
|
||||
# GENERATE ENV FILES
|
||||
# ============================================================================
|
||||
|
||||
log_info "Generating centralized environment files..."
|
||||
|
||||
mkdir -p "${ENV_DIR}"
|
||||
|
||||
# Write master credentials file for reference
|
||||
cat > "${ENV_DIR}/credentials.env" <<EOF
|
||||
# LetsBe Cloud Credentials - Generated $(date -Iseconds)
|
||||
# Customer: ${customer}
|
||||
# Domain: ${domain}
|
||||
# Company: ${company_name}
|
||||
#
|
||||
# KEEP THIS FILE SECURE - Contains all generated passwords
|
||||
#
|
||||
|
||||
# WordPress
|
||||
WORDPRESS_DB_USER=${wordpress_db_user}
|
||||
WORDPRESS_DB_PASSWORD=${wordpress_db_password}
|
||||
WORDPRESS_MARIADB_ROOT_PASSWORD=${wordpresss_mariadb_root_password}
|
||||
|
||||
# Nextcloud
|
||||
NEXTCLOUD_ADMIN_PASSWORD=${nextcloud_admin_password}
|
||||
NEXTCLOUD_POSTGRES_USER=${nextcloud_postgres_user}
|
||||
NEXTCLOUD_POSTGRES_PASSWORD=${nextcloud_postgres_password}
|
||||
|
||||
# Nextcloud Talk HPB / TURN
|
||||
TURN_SECRET=${turn_secret}
|
||||
SIGNALING_SECRET=${signaling_secret}
|
||||
INTERNAL_SECRET=${internal_secret}
|
||||
SERVER_IP=${server_ip}
|
||||
|
||||
# Listmonk
|
||||
LISTMONK_ADMIN_USER=${listmonk_admin_username}
|
||||
LISTMONK_ADMIN_PASSWORD=${listmonk_admin_password}
|
||||
|
||||
# MinIO
|
||||
MINIO_ROOT_USER=${minio_root_user}
|
||||
MINIO_ROOT_PASSWORD=${minio_root_password}
|
||||
|
||||
# Keycloak
|
||||
KEYCLOAK_ADMIN_PASSWORD=${keycloak_admin_password}
|
||||
|
||||
# Portainer
|
||||
PORTAINER_ADMIN_USER=${portainer_admin_user}
|
||||
PORTAINER_ADMIN_PASSWORD=${portainer_admin_password}
|
||||
|
||||
# Sysadmin Agent
|
||||
# Note: Registration token is generated by local_bootstrap.sh after license validation.
|
||||
# It calls the local orchestrator to create a one-time registration token.
|
||||
# After initial registration, agent credentials are persisted to ~/.letsbe-agent/credentials.json
|
||||
# SYSADMIN_REGISTRATION_TOKEN will be written to sysadmin-credentials.env by bootstrap
|
||||
# SYSADMIN_AGENT_TOKEN=${sysadmin_agent_token} # Deprecated
|
||||
|
||||
# ============ HUB / ORCHESTRATOR ============
|
||||
# License key (validated during bootstrap)
|
||||
LICENSE_KEY=${license_key}
|
||||
HUB_API_KEY=${hub_api_key}
|
||||
HUB_URL=${hub_url}
|
||||
HUB_TELEMETRY_ENABLED=${hub_telemetry_enabled}
|
||||
INSTANCE_ID=${instance_id}
|
||||
|
||||
# Local orchestrator credentials
|
||||
ORCHESTRATOR_DB_PASSWORD=${orchestrator_db_password}
|
||||
ADMIN_API_KEY=${admin_api_key}
|
||||
|
||||
# Local agent authentication (shared between orchestrator and sysadmin agent)
|
||||
LOCAL_AGENT_KEY=${local_agent_key}
|
||||
EOF
|
||||
|
||||
# Add Docker Hub section if docker_user was provided
|
||||
if [[ -n "${docker_user}" ]]; then
|
||||
cat >> "${ENV_DIR}/credentials.env" <<EOF
|
||||
|
||||
# Docker Hub
|
||||
DOCKER_HUB_USER=${docker_user}
|
||||
# Note: Token not stored for security - regenerate from Docker Hub if needed
|
||||
EOF
|
||||
fi
|
||||
|
||||
chmod 600 "${ENV_DIR}/credentials.env"
|
||||
|
||||
# Write Portainer admin password file (for automated admin setup)
|
||||
# This file is mounted into the Portainer container and used with --admin-password-file
|
||||
# NOTE: --admin-password-file expects PLAIN TEXT password, NOT bcrypt hash
|
||||
# (--admin-password expects bcrypt hash, but --admin-password-file expects plain text)
|
||||
printf '%s' "${portainer_admin_password}" > "${ENV_DIR}/portainer_admin_password.txt"
|
||||
chmod 600 "${ENV_DIR}/portainer_admin_password.txt"
|
||||
log_info "Portainer admin password file saved to: ${ENV_DIR}/portainer_admin_password.txt"
|
||||
|
||||
log_info "Environment setup complete."
|
||||
log_info "Credentials saved to: ${ENV_DIR}/credentials.env"
|
||||
258
letsbe-ansible-runner/scripts/local_bootstrap.sh
Normal file
258
letsbe-ansible-runner/scripts/local_bootstrap.sh
Normal file
@@ -0,0 +1,258 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Local Orchestrator Bootstrap (with License Validation)
|
||||
#
|
||||
# This script runs AFTER docker-compose up to:
|
||||
# 1. VALIDATE LICENSE with LetsBe Hub (REQUIRED for official installations)
|
||||
# 2. Wait for orchestrator health check (includes migrations via container startup)
|
||||
# 3. Get local tenant ID (for verification/logging)
|
||||
# 4. Write simplified credentials file
|
||||
#
|
||||
# NOTE: Database migrations are now run by the orchestrator container on startup
|
||||
#
|
||||
# IMPORTANT: Agent registration is handled via LOCAL_AGENT_KEY
|
||||
# from docker-compose.yml environment, NOT registration tokens.
|
||||
#
|
||||
# This script is idempotent - safe to run multiple times.
|
||||
#
|
||||
# Usage:
|
||||
# HUB_URL="https://hub.letsbe.biz" \
|
||||
# LICENSE_KEY="lb_inst_..." \
|
||||
# INSTANCE_ID="acme-orchestrator" \
|
||||
# ADMIN_API_KEY="admin_key" \
|
||||
# CUSTOMER="acme" \
|
||||
# bash local_bootstrap.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============ CONFIGURATION ============
|
||||
|
||||
HUB_URL="${HUB_URL:-https://hub.letsbe.biz}"
|
||||
LICENSE_KEY="${LICENSE_KEY:-}"
|
||||
INSTANCE_ID="${INSTANCE_ID:?INSTANCE_ID required}"
|
||||
ORCHESTRATOR_URL="${ORCHESTRATOR_URL:-http://localhost:8100}"
|
||||
ADMIN_API_KEY="${ADMIN_API_KEY:?ADMIN_API_KEY required}"
|
||||
CUSTOMER="${CUSTOMER:?CUSTOMER required}"
|
||||
CREDENTIALS_DIR="${CREDENTIALS_DIR:-/opt/letsbe/env}"
|
||||
|
||||
# ============ LOGGING ============
|
||||
|
||||
log_info() { echo "[BOOTSTRAP] $(date '+%Y-%m-%d %H:%M:%S') $*"; }
|
||||
log_error() { echo "[BOOTSTRAP-ERROR] $(date '+%Y-%m-%d %H:%M:%S') $*" >&2; }
|
||||
log_success() { echo "[BOOTSTRAP-OK] $(date '+%Y-%m-%d %H:%M:%S') $*"; }
|
||||
log_warn() { echo "[BOOTSTRAP-WARN] $(date '+%Y-%m-%d %H:%M:%S') $*" >&2; }
|
||||
|
||||
# ============ LICENSE VALIDATION (FIRST STEP) ============
|
||||
|
||||
validate_license() {
|
||||
log_info "Validating license with LetsBe Hub..."
|
||||
|
||||
# Check if license key is provided
|
||||
if [ -z "$LICENSE_KEY" ]; then
|
||||
log_error "LICENSE_KEY is required but not provided."
|
||||
log_error "Please obtain a license key from LetsBe Hub."
|
||||
log_error "Add 'license_key' to your config.json and re-run provisioning."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Hub URL is configured
|
||||
if [ -z "$HUB_URL" ]; then
|
||||
log_error "HUB_URL is required but not provided."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Call Hub activation endpoint
|
||||
local http_code
|
||||
http_code=$(curl -s -o /tmp/activation_response.json -w "%{http_code}" \
|
||||
-X POST "${HUB_URL}/api/v1/instances/activate" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"license_key\": \"${LICENSE_KEY}\", \"instance_id\": \"${INSTANCE_ID}\"}")
|
||||
|
||||
if [ "$http_code" != "200" ]; then
|
||||
log_error "License validation failed (HTTP $http_code)"
|
||||
|
||||
# Parse error response
|
||||
if [ -f /tmp/activation_response.json ]; then
|
||||
local error_msg
|
||||
local error_code
|
||||
|
||||
# Try to parse JSON error response
|
||||
error_msg=$(jq -r '.error // .detail.error // "Unknown error"' /tmp/activation_response.json 2>/dev/null || echo "Unknown error")
|
||||
error_code=$(jq -r '.code // .detail.code // "unknown"' /tmp/activation_response.json 2>/dev/null || echo "unknown")
|
||||
|
||||
log_error "Error: $error_msg (code: $error_code)"
|
||||
|
||||
case "$error_code" in
|
||||
"invalid_license")
|
||||
log_error "The provided license key is invalid."
|
||||
log_error "Please verify your license_key in config.json."
|
||||
;;
|
||||
"expired")
|
||||
log_error "Your license has expired."
|
||||
log_error "Please contact LetsBe to renew your license."
|
||||
;;
|
||||
"suspended")
|
||||
log_error "Your license has been suspended."
|
||||
log_error "Please contact LetsBe support."
|
||||
;;
|
||||
"instance_not_found")
|
||||
log_error "Instance ID '$INSTANCE_ID' not found in Hub."
|
||||
log_error "Please ensure your instance was created in LetsBe Hub."
|
||||
;;
|
||||
*)
|
||||
log_error "Please contact LetsBe support with error code: $error_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
rm -f /tmp/activation_response.json
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "License validated successfully!"
|
||||
|
||||
# Extract hub_api_key from response if provided
|
||||
local hub_api_key
|
||||
hub_api_key=$(jq -r '.hub_api_key // empty' /tmp/activation_response.json 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$hub_api_key" ] && [ "$hub_api_key" != "USE_EXISTING" ]; then
|
||||
log_info "Received hub_api_key from activation"
|
||||
export HUB_API_KEY="$hub_api_key"
|
||||
|
||||
# Save to credentials file
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
echo "HUB_API_KEY=${hub_api_key}" >> "${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
chmod 600 "${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
log_info "Hub API key saved to ${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
fi
|
||||
|
||||
rm -f /tmp/activation_response.json
|
||||
}
|
||||
|
||||
# ============ ORCHESTRATOR FUNCTIONS ============
|
||||
|
||||
wait_for_orchestrator() {
|
||||
log_info "Waiting for orchestrator to be ready..."
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
while [[ $attempt -lt $max_attempts ]]; do
|
||||
if curl -sf "${ORCHESTRATOR_URL}/health" > /dev/null 2>&1; then
|
||||
log_success "Orchestrator is ready"
|
||||
return 0
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
log_info "Attempt $attempt/$max_attempts - waiting..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
log_error "Orchestrator not ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
run_migrations() {
|
||||
log_info "Running database migrations..."
|
||||
|
||||
# Find the orchestrator container
|
||||
local orchestrator_container
|
||||
orchestrator_container=$(docker ps --format '{{.Names}}' | grep -E "(orchestrator|${CUSTOMER}.*orchestrator)" | head -1)
|
||||
|
||||
if [ -z "$orchestrator_container" ]; then
|
||||
log_error "Could not find orchestrator container"
|
||||
return 1
|
||||
fi
|
||||
|
||||
docker exec "$orchestrator_container" alembic upgrade head
|
||||
log_success "Migrations complete"
|
||||
}
|
||||
|
||||
get_local_tenant_id() {
|
||||
log_info "Getting local tenant ID..."
|
||||
local response
|
||||
response=$(curl -sf "${ORCHESTRATOR_URL}/api/v1/meta/instance")
|
||||
local tenant_id
|
||||
tenant_id=$(echo "$response" | jq -r '.tenant_id')
|
||||
|
||||
if [ "$tenant_id" == "null" ] || [ -z "$tenant_id" ]; then
|
||||
log_error "Failed to get tenant_id from /api/v1/meta/instance"
|
||||
log_error "Response: $response"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_success "Tenant ID: $tenant_id"
|
||||
echo "$tenant_id"
|
||||
}
|
||||
|
||||
write_credentials() {
|
||||
local tenant_id=$1
|
||||
local credentials_file="${CREDENTIALS_DIR}/sysadmin-credentials.env"
|
||||
|
||||
log_info "Writing credentials to ${credentials_file}..."
|
||||
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
|
||||
# NOTE: In LOCAL_MODE, agent uses LOCAL_AGENT_KEY from docker-compose.yml
|
||||
# We do NOT write ADMIN_API_KEY here - agent doesn't need it
|
||||
# We do NOT write registration tokens - LOCAL_MODE uses direct key auth
|
||||
cat > "${credentials_file}" <<EOF
|
||||
# LetsBe LOCAL_MODE Credentials
|
||||
# Generated by local_bootstrap.sh at $(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
#
|
||||
# Agent Registration: Uses LOCAL_AGENT_KEY from docker-compose.yml
|
||||
# NOT stored here to minimize secrets at rest
|
||||
TENANT_ID=${tenant_id}
|
||||
INSTANCE_ID=${INSTANCE_ID}
|
||||
EOF
|
||||
|
||||
chmod 600 "${credentials_file}"
|
||||
log_success "Credentials written"
|
||||
}
|
||||
|
||||
write_admin_credentials() {
|
||||
# Separate file for admin scripts only - NOT for agent
|
||||
local admin_creds="${CREDENTIALS_DIR}/admin-credentials.env"
|
||||
|
||||
log_info "Writing admin credentials to ${admin_creds}..."
|
||||
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
cat > "${admin_creds}" <<EOF
|
||||
# LetsBe Admin Credentials (root only)
|
||||
# Generated by local_bootstrap.sh at $(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
ADMIN_API_KEY=${ADMIN_API_KEY}
|
||||
EOF
|
||||
|
||||
chmod 600 "${admin_creds}"
|
||||
chown root:root "${admin_creds}" 2>/dev/null || true
|
||||
log_success "Admin credentials written (root-only access)"
|
||||
}
|
||||
|
||||
# ============ MAIN ============
|
||||
|
||||
main() {
|
||||
log_info "Starting local orchestrator bootstrap for: ${CUSTOMER}"
|
||||
log_info "Instance ID: ${INSTANCE_ID}"
|
||||
|
||||
# STEP 1: License validation (REQUIRED for official installations)
|
||||
# This is the gating step - if license fails, nothing else runs
|
||||
validate_license
|
||||
|
||||
# STEP 2: Wait for orchestrator (migrations run on container startup)
|
||||
wait_for_orchestrator
|
||||
|
||||
# STEP 3: Get tenant ID (for verification)
|
||||
local tenant_id
|
||||
tenant_id=$(get_local_tenant_id)
|
||||
|
||||
# STEP 4: Write credentials
|
||||
# NOTE: No registration token creation - LOCAL_MODE uses LOCAL_AGENT_KEY
|
||||
write_credentials "${tenant_id}"
|
||||
write_admin_credentials
|
||||
|
||||
log_success "Bootstrap complete!"
|
||||
log_info "Instance '${INSTANCE_ID}' is now licensed and activated"
|
||||
log_info ""
|
||||
log_info "Agent registration: Uses LOCAL_AGENT_KEY from docker-compose.yml"
|
||||
log_info "Agent should register with orchestrator within 30 seconds"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
511
letsbe-ansible-runner/scripts/restore.sh
Normal file
511
letsbe-ansible-runner/scripts/restore.sh
Normal file
@@ -0,0 +1,511 @@
|
||||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# LetsBe Restore Script
|
||||
# =============================================================================
|
||||
#
|
||||
# Restores backups created by backups.sh.
|
||||
#
|
||||
# Usage:
|
||||
# restore.sh list List available local backups
|
||||
# restore.sh list-remote List available remote backups
|
||||
# restore.sh download <DATE> Download a remote backup set locally
|
||||
# restore.sh postgres <TOOL> <FILE> Restore a PostgreSQL database
|
||||
# restore.sh mysql <TOOL> <FILE> Restore a MySQL/MariaDB database
|
||||
# restore.sh mongo <TOOL> <FILE> Restore a MongoDB database
|
||||
# restore.sh env <FILE> Restore env files
|
||||
# restore.sh configs <FILE> Restore config files
|
||||
# restore.sh nginx <FILE> Restore nginx configs
|
||||
# restore.sh full <DATE> Full restore from a backup date
|
||||
#
|
||||
# Examples:
|
||||
# restore.sh list
|
||||
# restore.sh postgres chatwoot /tmp/letsbe-backups/pg_chatwoot_20260207_020000.sql.gz
|
||||
# restore.sh env /tmp/letsbe-backups/dir_env-files_20260207_020000.tar.gz
|
||||
# restore.sh download 20260207_020000
|
||||
# restore.sh full 20260207_020000
|
||||
#
|
||||
# IMPORTANT:
|
||||
# - Always stop the tool's application containers before restoring its database.
|
||||
# - Database containers must remain running during restore.
|
||||
# - After restore, restart the full tool stack.
|
||||
#
|
||||
# =============================================================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
BACKUP_DIR="/tmp/letsbe-backups"
|
||||
RCLONE_REMOTE="backup"
|
||||
|
||||
# =============================================================================
|
||||
# HELPERS
|
||||
# =============================================================================
|
||||
|
||||
log() {
|
||||
echo "[RESTORE] $*"
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "[RESTORE ERROR] $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
local file=$1
|
||||
[[ -f "$file" ]] || die "File not found: $file"
|
||||
}
|
||||
|
||||
# Find a running container by suffix pattern
|
||||
find_container() {
|
||||
local pattern=$1
|
||||
docker ps --format '{{.Names}}' | grep -E "(^|-)${pattern}$" | head -1
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# COMMANDS
|
||||
# =============================================================================
|
||||
|
||||
cmd_list() {
|
||||
log "Available local backups in ${BACKUP_DIR}:"
|
||||
echo ""
|
||||
if [[ -d "$BACKUP_DIR" ]]; then
|
||||
ls -lhS "$BACKUP_DIR"/ 2>/dev/null || echo " (empty)"
|
||||
else
|
||||
echo " No backup directory found."
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_list_remote() {
|
||||
if ! command -v rclone &> /dev/null; then
|
||||
die "rclone not installed"
|
||||
fi
|
||||
if ! rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
die "rclone remote '${RCLONE_REMOTE}' not configured"
|
||||
fi
|
||||
|
||||
log "Available remote backups:"
|
||||
echo ""
|
||||
echo "Daily:"
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/" 2>/dev/null | grep -v "weekly" | awk '{print " " $NF}'
|
||||
echo ""
|
||||
echo "Weekly:"
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/weekly/" 2>/dev/null | awk '{print " " $NF}'
|
||||
}
|
||||
|
||||
cmd_download() {
|
||||
local date_str=$1
|
||||
if ! command -v rclone &> /dev/null; then
|
||||
die "rclone not installed"
|
||||
fi
|
||||
|
||||
local remote_path="${RCLONE_REMOTE}:letsbe-backups/${date_str}/"
|
||||
log "Downloading backup from ${remote_path}..."
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
rclone copy "$remote_path" "$BACKUP_DIR/" --progress
|
||||
log "Download complete. Files in ${BACKUP_DIR}/"
|
||||
}
|
||||
|
||||
cmd_restore_postgres() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
# Map tool name to container suffix, db name, and user
|
||||
local container db_name db_user
|
||||
case "$tool" in
|
||||
chatwoot) container="chatwoot-postgres"; db_name="chatwoot_production"; db_user="chatwoot" ;;
|
||||
nextcloud) container="nextcloud-postgres"; db_name="nextcloud"; db_user="nextcloud" ;;
|
||||
keycloak) container="keycloak-db"; db_name="keycloak"; db_user="keycloak" ;;
|
||||
n8n) container="n8n-postgres"; db_name="n8n"; db_user="postgres" ;;
|
||||
calcom) container="calcom-postgres"; db_name="calcom"; db_user="postgres" ;;
|
||||
umami) container="umami-db"; db_name="umami"; db_user="postgres" ;;
|
||||
nocodb) container="nocodb-postgres"; db_name="nocodb"; db_user="postgres" ;;
|
||||
typebot) container="typebot-db"; db_name="typebot"; db_user="postgres" ;;
|
||||
windmill) container="windmill-db"; db_name="windmill"; db_user="postgres" ;;
|
||||
glitchtip) container="glitchtip-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
penpot) container="penpot-postgres"; db_name="penpot"; db_user="postgres" ;;
|
||||
gitea) container="gitea-db"; db_name="gitea"; db_user="postgres" ;;
|
||||
odoo) container="odoo-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
listmonk) container="listmonk-db"; db_name="listmonk"; db_user="postgres" ;;
|
||||
documenso) container="documenso-db"; db_name="documenso_db"; db_user="postgres" ;;
|
||||
redash) container="redash-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
activepieces) container="activepieces-postgres"; db_name="activepieces"; db_user="postgres" ;;
|
||||
orchestrator) container="orchestrator-db"; db_name="orchestrator"; db_user="orchestrator" ;;
|
||||
*) die "Unknown PostgreSQL tool: $tool. Use one of: chatwoot, nextcloud, keycloak, n8n, calcom, umami, nocodb, typebot, windmill, glitchtip, penpot, gitea, odoo, listmonk, documenso, redash, activepieces, orchestrator" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring PostgreSQL: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will DROP and recreate database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Dropping and recreating database..."
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$db_name' AND pid <> pg_backend_pid();" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "DROP DATABASE IF EXISTS \"$db_name\";" postgres
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "CREATE DATABASE \"$db_name\";" postgres
|
||||
|
||||
log "Restoring from backup..."
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" psql -U "$db_user" "$db_name"
|
||||
else
|
||||
docker exec -i "$actual_container" psql -U "$db_user" "$db_name" < "$file"
|
||||
fi
|
||||
|
||||
log "PostgreSQL restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_mysql() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
local container db_name db_user
|
||||
case "$tool" in
|
||||
wordpress) container="wordpress-mysql"; db_name="wordpress"; db_user="root" ;;
|
||||
ghost) container="ghost-db"; db_name="ghost"; db_user="root" ;;
|
||||
*) die "Unknown MySQL tool: $tool. Use one of: wordpress, ghost" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring MySQL: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Restoring from backup..."
|
||||
# Read root password from credentials
|
||||
local creds_file="${LETSBE_BASE}/env/credentials.env"
|
||||
local db_pass=""
|
||||
if [[ "$tool" == "wordpress" ]]; then
|
||||
db_pass=$(grep "^WORDPRESS_MARIADB_ROOT_PASSWORD=" "$creds_file" 2>/dev/null | cut -d'=' -f2-)
|
||||
elif [[ "$tool" == "ghost" ]]; then
|
||||
db_pass=$(grep "^GHOST_MYSQL_PASSWORD=" "$creds_file" 2>/dev/null | cut -d'=' -f2-)
|
||||
fi
|
||||
[[ -z "$db_pass" ]] && die "Could not read database password from $creds_file"
|
||||
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" mysql -u"$db_user" -p"$db_pass" "$db_name"
|
||||
else
|
||||
docker exec -i "$actual_container" mysql -u"$db_user" -p"$db_pass" "$db_name" < "$file"
|
||||
fi
|
||||
|
||||
log "MySQL restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_mongo() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
local container db_name
|
||||
case "$tool" in
|
||||
librechat) container="librechat-mongodb"; db_name="LibreChat" ;;
|
||||
*) die "Unknown MongoDB tool: $tool. Use: librechat" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring MongoDB: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will drop and restore database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Restoring from backup..."
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" mongorestore --db "$db_name" --drop --archive
|
||||
else
|
||||
docker exec -i "$actual_container" mongorestore --db "$db_name" --drop --archive < "$file"
|
||||
fi
|
||||
|
||||
log "MongoDB restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_env() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring env files from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/env/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
# Backup current env files first
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/env" ]]; then
|
||||
log "Backing up current env files to ${LETSBE_BASE}/env.pre-restore.${timestamp}..."
|
||||
cp -a "${LETSBE_BASE}/env" "${LETSBE_BASE}/env.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
log "Extracting..."
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
chmod 600 "${LETSBE_BASE}/env/"*.env 2>/dev/null || true
|
||||
log "Env files restored."
|
||||
}
|
||||
|
||||
cmd_restore_configs() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring config files from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/config/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/config" ]]; then
|
||||
cp -a "${LETSBE_BASE}/config" "${LETSBE_BASE}/config.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
log "Config files restored."
|
||||
}
|
||||
|
||||
cmd_restore_nginx() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring nginx configs from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/nginx/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/nginx" ]]; then
|
||||
cp -a "${LETSBE_BASE}/nginx" "${LETSBE_BASE}/nginx.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
log "Nginx configs restored."
|
||||
log "Run: systemctl restart nginx"
|
||||
}
|
||||
|
||||
cmd_full_restore() {
|
||||
local date_str=$1
|
||||
local backup_path="$BACKUP_DIR"
|
||||
|
||||
log "=== Full System Restore for date: $date_str ==="
|
||||
echo ""
|
||||
echo "This will restore ALL databases and configuration files from the backup."
|
||||
echo "Make sure all tool containers are stopped (except database containers)."
|
||||
echo ""
|
||||
read -p "Continue with full restore? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Full restore cancelled."
|
||||
|
||||
# Check if files exist locally, download if not
|
||||
local pg_count
|
||||
pg_count=$(ls "${backup_path}"/pg_*"${date_str}"* 2>/dev/null | wc -l)
|
||||
if [[ "$pg_count" -eq 0 ]]; then
|
||||
log "Backup files not found locally. Attempting remote download..."
|
||||
cmd_download "$date_str"
|
||||
fi
|
||||
|
||||
# Restore env files
|
||||
local env_file="${backup_path}/dir_env-files_${date_str}.tar.gz"
|
||||
if [[ -f "$env_file" ]]; then
|
||||
log "Restoring env files..."
|
||||
# Non-interactive for full restore (already confirmed above)
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
[[ -d "${LETSBE_BASE}/env" ]] && cp -a "${LETSBE_BASE}/env" "${LETSBE_BASE}/env.pre-restore.${timestamp}"
|
||||
tar xzf "$env_file" -C "${LETSBE_BASE}/"
|
||||
chmod 600 "${LETSBE_BASE}/env/"*.env 2>/dev/null || true
|
||||
log " Env files restored."
|
||||
fi
|
||||
|
||||
# Restore configs
|
||||
local cfg_file="${backup_path}/dir_letsbe-config_${date_str}.tar.gz"
|
||||
if [[ -f "$cfg_file" ]]; then
|
||||
log "Restoring config files..."
|
||||
tar xzf "$cfg_file" -C "${LETSBE_BASE}/"
|
||||
log " Config files restored."
|
||||
fi
|
||||
|
||||
# Restore nginx configs
|
||||
local nginx_file="${backup_path}/dir_nginx-configs_${date_str}.tar.gz"
|
||||
if [[ -f "$nginx_file" ]]; then
|
||||
log "Restoring nginx configs..."
|
||||
tar xzf "$nginx_file" -C "${LETSBE_BASE}/"
|
||||
log " Nginx configs restored."
|
||||
fi
|
||||
|
||||
# Restore all PostgreSQL databases found for this date
|
||||
log "Restoring PostgreSQL databases..."
|
||||
for pg_file in "${backup_path}"/pg_*"${date_str}"*.sql.gz; do
|
||||
[[ -f "$pg_file" ]] || continue
|
||||
# Extract tool name from filename: pg_<tool>_<date>.sql.gz
|
||||
local tool_name
|
||||
tool_name=$(basename "$pg_file" | sed "s/^pg_//;s/_${date_str}.*//")
|
||||
log " Restoring PostgreSQL: $tool_name"
|
||||
|
||||
# Find container and restore without interactive prompt
|
||||
local container db_name db_user
|
||||
case "$tool_name" in
|
||||
chatwoot) container="chatwoot-postgres"; db_name="chatwoot_production"; db_user="chatwoot" ;;
|
||||
nextcloud) container="nextcloud-postgres"; db_name="nextcloud"; db_user="nextcloud" ;;
|
||||
keycloak) container="keycloak-db"; db_name="keycloak"; db_user="keycloak" ;;
|
||||
n8n) container="n8n-postgres"; db_name="n8n"; db_user="postgres" ;;
|
||||
calcom) container="calcom-postgres"; db_name="calcom"; db_user="postgres" ;;
|
||||
umami) container="umami-db"; db_name="umami"; db_user="postgres" ;;
|
||||
nocodb) container="nocodb-postgres"; db_name="nocodb"; db_user="postgres" ;;
|
||||
typebot) container="typebot-db"; db_name="typebot"; db_user="postgres" ;;
|
||||
windmill) container="windmill-db"; db_name="windmill"; db_user="postgres" ;;
|
||||
glitchtip) container="glitchtip-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
penpot) container="penpot-postgres"; db_name="penpot"; db_user="postgres" ;;
|
||||
gitea) container="gitea-db"; db_name="gitea"; db_user="postgres" ;;
|
||||
odoo) container="odoo-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
listmonk) container="listmonk-db"; db_name="listmonk"; db_user="postgres" ;;
|
||||
documenso) container="documenso-db"; db_name="documenso_db"; db_user="postgres" ;;
|
||||
redash) container="redash-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
activepieces) container="activepieces-postgres"; db_name="activepieces"; db_user="postgres" ;;
|
||||
orchestrator) container="orchestrator-db"; db_name="orchestrator"; db_user="orchestrator" ;;
|
||||
*) log " Skipping unknown tool: $tool_name"; continue ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
log " WARNING: Container '$container' not running, skipping $tool_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$db_name' AND pid <> pg_backend_pid();" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "DROP DATABASE IF EXISTS \"$db_name\";" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "CREATE DATABASE \"$db_name\";" postgres 2>/dev/null || true
|
||||
gunzip -c "$pg_file" | docker exec -i "$actual_container" psql -U "$db_user" "$db_name" > /dev/null 2>&1
|
||||
log " OK: $tool_name"
|
||||
done
|
||||
|
||||
# Restore MySQL databases
|
||||
log "Restoring MySQL databases..."
|
||||
for mysql_file in "${backup_path}"/mysql_*"${date_str}"*.sql.gz; do
|
||||
[[ -f "$mysql_file" ]] || continue
|
||||
local tool_name
|
||||
tool_name=$(basename "$mysql_file" | sed "s/^mysql_//;s/_${date_str}.*//")
|
||||
log " Restoring MySQL: $tool_name"
|
||||
|
||||
local container db_name db_pass
|
||||
case "$tool_name" in
|
||||
wordpress)
|
||||
container="wordpress-mysql"; db_name="wordpress"
|
||||
db_pass=$(grep "^WORDPRESS_MARIADB_ROOT_PASSWORD=" "${LETSBE_BASE}/env/credentials.env" 2>/dev/null | cut -d'=' -f2-)
|
||||
;;
|
||||
ghost)
|
||||
container="ghost-db"; db_name="ghost"
|
||||
db_pass=$(grep "^GHOST_MYSQL_PASSWORD=" "${LETSBE_BASE}/env/credentials.env" 2>/dev/null | cut -d'=' -f2-)
|
||||
;;
|
||||
*) log " Skipping unknown MySQL tool: $tool_name"; continue ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
log " WARNING: Container '$container' not running, skipping $tool_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ -n "$db_pass" ]]; then
|
||||
gunzip -c "$mysql_file" | docker exec -i "$actual_container" mysql -uroot -p"$db_pass" "$db_name" 2>/dev/null
|
||||
log " OK: $tool_name"
|
||||
else
|
||||
log " SKIP: No password found for $tool_name"
|
||||
fi
|
||||
done
|
||||
|
||||
# Restore MongoDB databases
|
||||
log "Restoring MongoDB databases..."
|
||||
for mongo_file in "${backup_path}"/mongo_*"${date_str}"*.archive.gz; do
|
||||
[[ -f "$mongo_file" ]] || continue
|
||||
local tool_name
|
||||
tool_name=$(basename "$mongo_file" | sed "s/^mongo_//;s/_${date_str}.*//")
|
||||
log " Restoring MongoDB: $tool_name"
|
||||
|
||||
case "$tool_name" in
|
||||
librechat)
|
||||
local actual_container
|
||||
actual_container=$(find_container "librechat-mongodb")
|
||||
if [[ -n "$actual_container" ]]; then
|
||||
gunzip -c "$mongo_file" | docker exec -i "$actual_container" mongorestore --db LibreChat --drop --archive 2>/dev/null
|
||||
log " OK: $tool_name"
|
||||
else
|
||||
log " WARNING: Container not running, skipping"
|
||||
fi
|
||||
;;
|
||||
*) log " Skipping unknown MongoDB tool: $tool_name" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
log ""
|
||||
log "=== Full Restore Complete ==="
|
||||
log "Now restart all tool stacks:"
|
||||
log " for stack in ${LETSBE_BASE}/stacks/*/docker-compose.yml; do"
|
||||
log " docker-compose -f \"\$stack\" restart"
|
||||
log " done"
|
||||
log " systemctl restart nginx"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN
|
||||
# =============================================================================
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "LetsBe Restore Tool"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " $0 list List local backups"
|
||||
echo " $0 list-remote List remote backups"
|
||||
echo " $0 download <DATE> Download remote backup"
|
||||
echo " $0 postgres <TOOL> <FILE> Restore PostgreSQL database"
|
||||
echo " $0 mysql <TOOL> <FILE> Restore MySQL database"
|
||||
echo " $0 mongo <TOOL> <FILE> Restore MongoDB database"
|
||||
echo " $0 env <FILE> Restore env files"
|
||||
echo " $0 configs <FILE> Restore config files"
|
||||
echo " $0 nginx <FILE> Restore nginx configs"
|
||||
echo " $0 full <DATE> Full system restore"
|
||||
echo ""
|
||||
echo "PostgreSQL tools: chatwoot, nextcloud, keycloak, n8n, calcom, umami,"
|
||||
echo " nocodb, typebot, windmill, glitchtip, penpot, gitea, odoo, listmonk,"
|
||||
echo " documenso, redash, activepieces, orchestrator"
|
||||
echo ""
|
||||
echo "MySQL tools: wordpress, ghost"
|
||||
echo ""
|
||||
echo "MongoDB tools: librechat"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
list) cmd_list ;;
|
||||
list-remote) cmd_list_remote ;;
|
||||
download) [[ $# -ge 2 ]] || die "Usage: $0 download <DATE>"; cmd_download "$2" ;;
|
||||
postgres) [[ $# -ge 3 ]] || die "Usage: $0 postgres <TOOL> <FILE>"; cmd_restore_postgres "$2" "$3" ;;
|
||||
mysql) [[ $# -ge 3 ]] || die "Usage: $0 mysql <TOOL> <FILE>"; cmd_restore_mysql "$2" "$3" ;;
|
||||
mongo) [[ $# -ge 3 ]] || die "Usage: $0 mongo <TOOL> <FILE>"; cmd_restore_mongo "$2" "$3" ;;
|
||||
env) [[ $# -ge 2 ]] || die "Usage: $0 env <FILE>"; cmd_restore_env "$2" ;;
|
||||
configs) [[ $# -ge 2 ]] || die "Usage: $0 configs <FILE>"; cmd_restore_configs "$2" ;;
|
||||
nginx) [[ $# -ge 2 ]] || die "Usage: $0 nginx <FILE>"; cmd_restore_nginx "$2" ;;
|
||||
full) [[ $# -ge 2 ]] || die "Usage: $0 full <DATE>"; cmd_full_restore "$2" ;;
|
||||
*) die "Unknown command: $1. Run '$0' for usage." ;;
|
||||
esac
|
||||
831
letsbe-ansible-runner/scripts/setup.sh
Normal file
831
letsbe-ansible-runner/scripts/setup.sh
Normal file
@@ -0,0 +1,831 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# LetsBe Server Setup Script
|
||||
# This script sets up the server and deploys selected tools.
|
||||
#
|
||||
# Usage:
|
||||
# ./setup.sh --tools "all" --domain "example.com"
|
||||
# ./setup.sh --tools "portainer,n8n,baserow" --domain "example.com"
|
||||
# ./setup.sh --tools "1,2,3"
|
||||
# ./setup.sh # Foundation only, no tools deployed
|
||||
#
|
||||
# Arguments:
|
||||
# --tools Comma-separated list of tools to deploy, "all", or tool numbers
|
||||
# --domain Domain name for SSL email (administrator@domain)
|
||||
# --skip-ssl Skip SSL certificate setup (useful for testing)
|
||||
# --admin-user Admin username to create with SSH key access
|
||||
# --admin-ssh-key Public SSH key for the admin user
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Prevent interactive prompts during apt install
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# =============================================================================
|
||||
# ARGUMENT PARSING
|
||||
# =============================================================================
|
||||
|
||||
TOOLS_TO_DEPLOY=""
|
||||
SKIP_SSL=false
|
||||
ROOT_SSL=false
|
||||
DOMAIN=""
|
||||
|
||||
# Docker registry authentication (optional)
|
||||
DOCKER_USER=""
|
||||
DOCKER_TOKEN=""
|
||||
DOCKER_REGISTRY=""
|
||||
|
||||
# Gitea registry authentication (for private images from code.letsbe.solutions)
|
||||
GITEA_REGISTRY=""
|
||||
GITEA_USER=""
|
||||
GITEA_TOKEN=""
|
||||
|
||||
# Admin user setup (optional - replaces hardcoded user)
|
||||
ADMIN_USER=""
|
||||
ADMIN_SSH_KEY=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--tools)
|
||||
TOOLS_TO_DEPLOY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--domain)
|
||||
DOMAIN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-ssl)
|
||||
SKIP_SSL=true
|
||||
shift
|
||||
;;
|
||||
--root-ssl)
|
||||
ROOT_SSL=true
|
||||
shift
|
||||
;;
|
||||
--docker-user)
|
||||
DOCKER_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-token)
|
||||
DOCKER_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-registry)
|
||||
DOCKER_REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-registry)
|
||||
GITEA_REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-user)
|
||||
GITEA_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-token)
|
||||
GITEA_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--admin-user)
|
||||
ADMIN_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--admin-ssh-key)
|
||||
ADMIN_SSH_KEY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--tools \"tool1,tool2,...\"|\"all\"] [--domain DOMAIN] [--skip-ssl] [--root-ssl]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --tools Comma-separated list of tools, 'all', or tool numbers"
|
||||
echo " --domain Domain name for SSL email (administrator@domain)"
|
||||
echo " --skip-ssl Skip SSL certificate setup"
|
||||
echo " --root-ssl Include root domain in SSL certificate"
|
||||
echo " --docker-user Docker registry username (optional)"
|
||||
echo " --docker-token Docker registry password/token (optional)"
|
||||
echo " --docker-registry Docker registry URL (optional, defaults to Docker Hub)"
|
||||
echo " --admin-user Admin username to create with SSH key access"
|
||||
echo " --admin-ssh-key Public SSH key for the admin user"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 --tools \"all\" --domain \"example.com\""
|
||||
echo " $0 --tools \"portainer,n8n,baserow\""
|
||||
echo " $0 --tools \"1,5,10\""
|
||||
echo " $0 # Foundation only"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "=== LetsBe Server Setup ==="
|
||||
echo ""
|
||||
|
||||
# =============================================================================
|
||||
# PACKAGE INSTALLATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[1/10] Installing system packages..."
|
||||
sudo apt update && sudo apt upgrade -y
|
||||
sudo apt install -y build-essential net-tools tree wget jq nano curl htop ufw fail2ban unattended-upgrades apt-listchanges apticron git gnupg ca-certificates apache2-utils acl certbot python3-certbot-nginx rsync rclone s3cmd zip sudo iptables htop dstat openssl
|
||||
|
||||
# =============================================================================
|
||||
# DOCKER INSTALLATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[2/10] Installing Docker..."
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
# Use --batch and --yes for non-interactive gpg (required for nohup/background execution)
|
||||
sudo curl -fsSL https://download.docker.com/linux/debian/gpg -o /tmp/docker.gpg
|
||||
sudo gpg --batch --yes --dearmor -o /etc/apt/keyrings/docker.gpg /tmp/docker.gpg
|
||||
rm -f /tmp/docker.gpg
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
sudo echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo $VERSION_CODENAME) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt update
|
||||
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
sudo systemctl enable docker
|
||||
|
||||
sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod 755 /usr/local/bin/docker-compose
|
||||
|
||||
# Docker registry login (optional - for private registries or to bypass rate limits)
|
||||
if [[ -n "$DOCKER_USER" && -n "$DOCKER_TOKEN" ]]; then
|
||||
if [[ -n "$DOCKER_REGISTRY" ]]; then
|
||||
echo "Logging into Docker registry: $DOCKER_REGISTRY..."
|
||||
echo "$DOCKER_TOKEN" | docker login -u "$DOCKER_USER" --password-stdin "$DOCKER_REGISTRY"
|
||||
else
|
||||
echo "Logging into Docker Hub..."
|
||||
echo "$DOCKER_TOKEN" | docker login -u "$DOCKER_USER" --password-stdin
|
||||
fi
|
||||
fi
|
||||
|
||||
# Gitea registry login (for private images from code.letsbe.solutions)
|
||||
if [[ -n "$GITEA_REGISTRY" && -n "$GITEA_USER" && -n "$GITEA_TOKEN" ]]; then
|
||||
echo "Logging into Gitea registry: $GITEA_REGISTRY..."
|
||||
echo "$GITEA_TOKEN" | docker login -u "$GITEA_USER" --password-stdin "$GITEA_REGISTRY"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# DISABLE CONFLICTING SERVICES
|
||||
# =============================================================================
|
||||
|
||||
echo "[3/10] Disabling conflicting services..."
|
||||
sudo systemctl stop exim4 2>/dev/null || true
|
||||
sudo systemctl disable exim4 2>/dev/null || true
|
||||
|
||||
sudo systemctl stop apache2 2>/dev/null || true
|
||||
sudo systemctl disable apache2 2>/dev/null || true
|
||||
sudo apt remove -y apache2 2>/dev/null || true
|
||||
|
||||
# =============================================================================
|
||||
# NGINX INSTALLATION & CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[4/10] Installing and configuring nginx..."
|
||||
sudo apt install -y nginx
|
||||
sudo systemctl enable nginx
|
||||
|
||||
sudo rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -keyout /etc/nginx/placeholder.key -out /etc/nginx/placeholder.crt
|
||||
|
||||
cat <<EOF > /etc/nginx/conf.d/fallback.conf
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
return 444;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl default_server;
|
||||
server_name _;
|
||||
return 444;
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
}
|
||||
EOF
|
||||
|
||||
sudo systemctl restart nginx
|
||||
|
||||
# =============================================================================
|
||||
# FIREWALL CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[5/10] Configuring UFW firewall..."
|
||||
ufw allow 22
|
||||
ufw allow 22022
|
||||
ufw allow 80
|
||||
ufw allow 443
|
||||
|
||||
# Open mail ports only if Poste mail server is being deployed
|
||||
if [[ "$TOOLS_TO_DEPLOY" == *"poste"* || "$TOOLS_TO_DEPLOY" == "all" ]]; then
|
||||
echo "Opening mail ports for Poste..."
|
||||
ufw allow 25
|
||||
ufw allow 587
|
||||
ufw allow 143
|
||||
ufw allow 110
|
||||
ufw allow 4190
|
||||
ufw allow 465
|
||||
ufw allow 993
|
||||
ufw allow 995
|
||||
fi
|
||||
|
||||
ufw --force enable
|
||||
|
||||
# =============================================================================
|
||||
# ADMIN USER SETUP
|
||||
# =============================================================================
|
||||
|
||||
if [[ -n "$ADMIN_USER" && -n "$ADMIN_SSH_KEY" ]]; then
|
||||
echo "[6/10] Configuring admin user '$ADMIN_USER'..."
|
||||
|
||||
if ! id -u "$ADMIN_USER" > /dev/null 2>&1; then
|
||||
echo "User $ADMIN_USER does not exist, will be created."
|
||||
useradd -m -s /bin/bash "$ADMIN_USER"
|
||||
fi
|
||||
|
||||
mkdir -p /home/$ADMIN_USER/.ssh
|
||||
chmod 700 /home/$ADMIN_USER/.ssh
|
||||
|
||||
echo "$ADMIN_SSH_KEY" >> /home/$ADMIN_USER/.ssh/authorized_keys
|
||||
chmod 600 /home/$ADMIN_USER/.ssh/authorized_keys
|
||||
chown -R $ADMIN_USER:$ADMIN_USER /home/$ADMIN_USER/.ssh
|
||||
|
||||
usermod -aG docker "$ADMIN_USER"
|
||||
|
||||
echo "Public key was added for user $ADMIN_USER."
|
||||
else
|
||||
echo "[6/10] Skipping admin user setup (no --admin-user and --admin-ssh-key provided)"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SSH SECURITY HARDENING
|
||||
# =============================================================================
|
||||
|
||||
echo "[7/10] Hardening SSH configuration..."
|
||||
cat <<EOF > /etc/ssh/sshd_config
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
Port 22022
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
#HostKey /etc/ssh/ssh_host_rsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel VERBOSE
|
||||
|
||||
LoginGraceTime 2m
|
||||
PermitRootLogin prohibit-password
|
||||
#StrictModes yes
|
||||
MaxAuthTries 6
|
||||
MaxSessions 10
|
||||
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
UsePAM yes
|
||||
|
||||
X11Forwarding yes
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
UsePrivilegeSeparation sandbox
|
||||
AuthenticationMethods publickey
|
||||
EOF
|
||||
|
||||
# NOTE: SSH restart moved to end of script to keep connection alive
|
||||
|
||||
# =============================================================================
|
||||
# AUTOMATIC SECURITY UPDATES
|
||||
# =============================================================================
|
||||
|
||||
echo "[8/10] Configuring automatic security updates..."
|
||||
cat <<EOF > /etc/apt/apt.conf.d/20auto-upgrades
|
||||
// Enable the update/upgrade script (0=disable)
|
||||
APT::Periodic::Enable "1";
|
||||
|
||||
// Do "apt-get update" automatically every n-days (0=disable)
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
|
||||
// Do "apt-get upgrade --download-only" every n-days (0=disable)
|
||||
APT::Periodic::Download-Upgradeable-Packages "1";
|
||||
|
||||
// Do "apt-get autoclean" every n-days (0=disable)
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
|
||||
// Send report mail to root
|
||||
// 0: no report (or null string)
|
||||
// 1: progress report (actually any string)
|
||||
// 2: + command outputs (remove -qq, remove 2>/dev/null, add -d)
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
|
||||
// Automatically upgrade packages from these
|
||||
Unattended-Upgrade::Origins-Pattern {
|
||||
// "o=Debian,a=stable";
|
||||
// "o=Debian,a=stable-updates";
|
||||
"origin=Debian,codename=\${distro_codename},label=Debian-Security";
|
||||
};
|
||||
|
||||
// You can specify your own packages to NOT automatically upgrade here
|
||||
Unattended-Upgrade::Package-Blacklist {
|
||||
};
|
||||
|
||||
// Run dpkg --force-confold --configure -a if a unclean dpkg state is detected to true to ensure that updates get installed even when the system got interrupted during a previous run
|
||||
Unattended-Upgrade::AutoFixInterruptedDpkg "true";
|
||||
|
||||
// Perform the upgrade when the machine is running because we wont be shutting our server down often
|
||||
Unattended-Upgrade::InstallOnShutdown "false";
|
||||
|
||||
// Send an email to this address with information about the packages upgraded.
|
||||
Unattended-Upgrade::Mail "administrator@letsbe.biz";
|
||||
|
||||
// Always send an e-mail
|
||||
Unattended-Upgrade::MailOnlyOnError "true";
|
||||
|
||||
// Remove all unused dependencies after the upgrade has finished
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
|
||||
// Remove any new unused dependencies after the upgrade has finished
|
||||
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
|
||||
|
||||
// Automatically reboot WITHOUT CONFIRMATION if the file /var/run/reboot-required is found after the upgrade.
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
|
||||
// Automatically reboot even if users are logged in.
|
||||
Unattended-Upgrade::Automatic-Reboot-WithUsers "false";
|
||||
EOF
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP SCRIPT & CRON
|
||||
# =============================================================================
|
||||
|
||||
echo "Setting up backup script and cron..."
|
||||
chmod 750 /opt/letsbe/scripts/backups.sh 2>/dev/null || true
|
||||
chmod 750 /opt/letsbe/scripts/restore.sh 2>/dev/null || true
|
||||
mkdir -p /root/.config/rclone
|
||||
mkdir -p /opt/letsbe/logs
|
||||
|
||||
# Install backup cron non-interactively (daily at 2am)
|
||||
BACKUP_CRON="0 2 * * * /bin/bash /opt/letsbe/scripts/backups.sh >> /opt/letsbe/logs/backup.log 2>&1"
|
||||
( crontab -l 2>/dev/null | grep -v "backups.sh"; echo "$BACKUP_CRON" ) | crontab -
|
||||
echo "Backup cron installed (daily at 2:00 AM)"
|
||||
|
||||
# =============================================================================
|
||||
# TOOL DEPLOYMENT
|
||||
# =============================================================================
|
||||
|
||||
echo "[9/10] Deploying tools..."
|
||||
|
||||
# Get list of available tools
|
||||
mapfile -t available_tools < <(ls /opt/letsbe/stacks/*/docker-compose.yml 2>/dev/null | xargs -I {} dirname {} | xargs -I {} basename {})
|
||||
|
||||
if [[ -z "$TOOLS_TO_DEPLOY" ]]; then
|
||||
echo "No tools specified. Skipping tool deployment."
|
||||
echo "Available tools: ${available_tools[*]}"
|
||||
echo "Use --tools to deploy tools later."
|
||||
else
|
||||
# Determine which tools to deploy
|
||||
declare -a tools_list=()
|
||||
|
||||
if [[ "$TOOLS_TO_DEPLOY" == "all" || "$TOOLS_TO_DEPLOY" == "a" ]]; then
|
||||
tools_list=("${available_tools[@]}")
|
||||
else
|
||||
# Parse comma-separated list
|
||||
IFS=',' read -ra requested_tools <<< "$TOOLS_TO_DEPLOY"
|
||||
for tool in "${requested_tools[@]}"; do
|
||||
tool=$(echo "$tool" | xargs) # Trim whitespace
|
||||
|
||||
# Check if it's a number (index)
|
||||
if [[ "$tool" =~ ^[0-9]+$ ]]; then
|
||||
idx=$((tool - 1))
|
||||
if [[ $idx -ge 0 && $idx -lt ${#available_tools[@]} ]]; then
|
||||
tools_list+=("${available_tools[$idx]}")
|
||||
else
|
||||
echo "Warning: Tool index $tool out of range, skipping."
|
||||
fi
|
||||
else
|
||||
# It's a tool name
|
||||
if [[ " ${available_tools[*]} " =~ " ${tool} " ]]; then
|
||||
tools_list+=("$tool")
|
||||
else
|
||||
echo "Warning: Tool '$tool' not found, skipping."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Ensure orchestrator is FIRST (creates network that sysadmin needs)
|
||||
if [[ -f "/opt/letsbe/stacks/orchestrator/docker-compose.yml" ]]; then
|
||||
# Remove orchestrator from current position if present
|
||||
declare -a new_list=()
|
||||
for tool in "${tools_list[@]}"; do
|
||||
if [[ "$tool" != "orchestrator" ]]; then
|
||||
new_list+=("$tool")
|
||||
fi
|
||||
done
|
||||
# Prepend orchestrator to front
|
||||
tools_list=("orchestrator" "${new_list[@]}")
|
||||
echo "Orchestrator moved to front (creates network for sysadmin)"
|
||||
fi
|
||||
|
||||
echo "Deploying tools: ${tools_list[*]}"
|
||||
|
||||
# Track deployed tools for SSL setup
|
||||
DEPLOYED_TOOLS=()
|
||||
|
||||
for tool_name in "${tools_list[@]}"; do
|
||||
compose_file="/opt/letsbe/stacks/${tool_name}/docker-compose.yml"
|
||||
|
||||
if [[ -f "$compose_file" ]]; then
|
||||
# Copy .env file to centralized env directory if it exists
|
||||
stack_env="/opt/letsbe/stacks/${tool_name}/.env"
|
||||
central_env="/opt/letsbe/env/${tool_name}.env"
|
||||
if [[ -f "$stack_env" ]]; then
|
||||
cp "$stack_env" "$central_env"
|
||||
chmod 600 "$central_env"
|
||||
echo "Copied env file for $tool_name"
|
||||
fi
|
||||
|
||||
# Tool-specific pre-deployment setup
|
||||
if [[ "$tool_name" == "nextcloud" ]]; then
|
||||
echo "Creating Nextcloud bind mount directories..."
|
||||
mkdir -p /opt/letsbe/config/nextcloud
|
||||
mkdir -p /opt/letsbe/data/nextcloud
|
||||
# Set appropriate ownership for www-data (uid 33 in Nextcloud container)
|
||||
chown -R 33:33 /opt/letsbe/config/nextcloud
|
||||
chown -R 33:33 /opt/letsbe/data/nextcloud
|
||||
fi
|
||||
|
||||
if [[ "$tool_name" == "sysadmin" ]]; then
|
||||
echo " Pulling latest sysadmin agent image..."
|
||||
docker-compose -f "$compose_file" pull
|
||||
fi
|
||||
|
||||
echo "Starting $tool_name..."
|
||||
docker-compose -f "$compose_file" up -d
|
||||
|
||||
# Tool-specific post-deployment initialization
|
||||
if [[ "$tool_name" == "portainer" ]]; then
|
||||
echo "Configuring Portainer local Docker endpoint..."
|
||||
|
||||
# Get Portainer container name
|
||||
PORTAINER_CONTAINER=$(docker ps --format '{{.Names}}' | grep portainer | head -1)
|
||||
|
||||
if [[ -n "$PORTAINER_CONTAINER" ]]; then
|
||||
# Wait for Portainer to be ready
|
||||
echo " Waiting for Portainer to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -ks https://localhost:9443/api/system/status >/dev/null 2>&1; then
|
||||
echo " Portainer is ready."
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Read admin password from file
|
||||
PORTAINER_PASSWORD=$(cat /opt/letsbe/env/portainer_admin_password.txt 2>/dev/null)
|
||||
|
||||
if [[ -n "$PORTAINER_PASSWORD" ]]; then
|
||||
# Authenticate and get JWT token
|
||||
echo " Authenticating with Portainer..."
|
||||
JWT_RESPONSE=$(curl -ks -X POST https://localhost:9443/api/auth \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"username\":\"admin\",\"password\":\"${PORTAINER_PASSWORD}\"}" 2>/dev/null)
|
||||
|
||||
JWT=$(echo "$JWT_RESPONSE" | grep -o '"jwt":"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
if [[ -n "$JWT" ]]; then
|
||||
echo " Creating local Docker endpoint..."
|
||||
# Create local Docker socket endpoint
|
||||
ENDPOINT_RESPONSE=$(curl -ks -X POST https://localhost:9443/api/endpoints \
|
||||
-H "Authorization: Bearer $JWT" \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "Name=local" \
|
||||
-F "EndpointCreationType=1" 2>/dev/null)
|
||||
|
||||
if echo "$ENDPOINT_RESPONSE" | grep -q '"Id"'; then
|
||||
echo " Local Docker endpoint created successfully."
|
||||
else
|
||||
echo " Warning: Endpoint creation response: $ENDPOINT_RESPONSE"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not authenticate with Portainer"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not read Portainer password file"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not find Portainer container"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$tool_name" == "chatwoot" ]]; then
|
||||
echo "Initializing Chatwoot database (pgvector + migrations)..."
|
||||
|
||||
# Get the customer prefix from the container name
|
||||
CHATWOOT_POSTGRES=$(docker ps --format '{{.Names}}' | grep chatwoot-postgres | head -1)
|
||||
CHATWOOT_RAILS=$(docker ps --format '{{.Names}}' | grep chatwoot-rails | head -1)
|
||||
|
||||
if [[ -n "$CHATWOOT_POSTGRES" && -n "$CHATWOOT_RAILS" ]]; then
|
||||
# Wait for Postgres to be ready
|
||||
echo " Waiting for Postgres to be ready..."
|
||||
for i in {1..30}; do
|
||||
if docker exec "$CHATWOOT_POSTGRES" pg_isready -U chatwoot -d chatwoot_production >/dev/null 2>&1; then
|
||||
echo " Postgres is ready."
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Create pgvector extension
|
||||
echo " Creating pgvector extension..."
|
||||
docker exec "$CHATWOOT_POSTGRES" psql -U chatwoot -d chatwoot_production -c "CREATE EXTENSION IF NOT EXISTS vector;" 2>/dev/null || true
|
||||
|
||||
# Wait for Rails container to be fully up
|
||||
echo " Waiting for Rails container..."
|
||||
sleep 10
|
||||
|
||||
# Run database migrations
|
||||
echo " Running Chatwoot database prepare..."
|
||||
docker exec "$CHATWOOT_RAILS" bundle exec rails db:chatwoot_prepare 2>&1 || echo " Note: db:chatwoot_prepare may have already run"
|
||||
|
||||
echo " Chatwoot initialization complete."
|
||||
else
|
||||
echo " Warning: Could not find Chatwoot containers for initialization"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Link nginx config if exists
|
||||
nginx_conf="/opt/letsbe/nginx/${tool_name}.conf"
|
||||
if [[ -f "$nginx_conf" ]]; then
|
||||
cp "$nginx_conf" /etc/nginx/sites-available/
|
||||
ln -sf /etc/nginx/sites-available/${tool_name}.conf /etc/nginx/sites-enabled/
|
||||
echo "Nginx config linked for $tool_name"
|
||||
DEPLOYED_TOOLS+=("$tool_name")
|
||||
else
|
||||
echo "No nginx config for $tool_name (may not need one)"
|
||||
fi
|
||||
else
|
||||
echo "Warning: docker-compose.yml not found for $tool_name"
|
||||
fi
|
||||
done
|
||||
|
||||
# Restart nginx to apply new configs
|
||||
systemctl restart nginx
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SYSADMIN AGENT (Always deployed)
|
||||
# =============================================================================
|
||||
|
||||
echo "[9.5/10] Deploying sysadmin agent..."
|
||||
|
||||
SYSADMIN_COMPOSE="/opt/letsbe/stacks/sysadmin/docker-compose.yml"
|
||||
if [[ -f "$SYSADMIN_COMPOSE" ]]; then
|
||||
# Check if sysadmin is already running
|
||||
if docker ps --format '{{.Names}}' | grep -q "agent$"; then
|
||||
echo " Sysadmin agent already running, updating..."
|
||||
fi
|
||||
|
||||
echo " Pulling latest sysadmin agent image..."
|
||||
docker-compose -f "$SYSADMIN_COMPOSE" pull
|
||||
|
||||
echo " Starting sysadmin agent..."
|
||||
docker-compose -f "$SYSADMIN_COMPOSE" up -d
|
||||
|
||||
echo " Sysadmin agent deployed successfully."
|
||||
else
|
||||
echo "Warning: Sysadmin docker-compose.yml not found at $SYSADMIN_COMPOSE"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# LOCAL ORCHESTRATOR BOOTSTRAP (License Validation + Agent Registration)
|
||||
# =============================================================================
|
||||
|
||||
echo "[9.6/10] Running local orchestrator bootstrap..."
|
||||
|
||||
BOOTSTRAP_SCRIPT="/opt/letsbe/scripts/local_bootstrap.sh"
|
||||
CREDENTIALS_FILE="/opt/letsbe/env/credentials.env"
|
||||
|
||||
if [[ -f "$BOOTSTRAP_SCRIPT" && -f "$CREDENTIALS_FILE" ]]; then
|
||||
# Source credentials to get required variables
|
||||
source "$CREDENTIALS_FILE"
|
||||
|
||||
echo " Validating license and setting up local orchestrator..."
|
||||
echo " Instance ID: ${INSTANCE_ID:-unknown}"
|
||||
echo " Hub URL: ${HUB_URL:-unknown}"
|
||||
|
||||
# Run bootstrap script with required environment variables
|
||||
HUB_URL="${HUB_URL}" \
|
||||
LICENSE_KEY="${LICENSE_KEY}" \
|
||||
INSTANCE_ID="${INSTANCE_ID}" \
|
||||
ORCHESTRATOR_URL="http://localhost:8100" \
|
||||
ADMIN_API_KEY="${ADMIN_API_KEY}" \
|
||||
CUSTOMER="$(echo ${INSTANCE_ID} | sed 's/-orchestrator$//')" \
|
||||
CREDENTIALS_DIR="/opt/letsbe/env" \
|
||||
bash "$BOOTSTRAP_SCRIPT"
|
||||
|
||||
BOOTSTRAP_EXIT=$?
|
||||
|
||||
if [[ $BOOTSTRAP_EXIT -ne 0 ]]; then
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo " BOOTSTRAP FAILED"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
echo "License validation or agent registration failed."
|
||||
echo "Check the error messages above for details."
|
||||
echo ""
|
||||
echo "Common issues:"
|
||||
echo " - Invalid license_key in config.json"
|
||||
echo " - Network connectivity to Hub (${HUB_URL})"
|
||||
echo " - Instance not registered in LetsBe Hub"
|
||||
echo ""
|
||||
echo "The stack has been deployed but is NOT properly configured."
|
||||
echo "Please fix the issue and re-run: bash /opt/letsbe/scripts/local_bootstrap.sh"
|
||||
echo "=============================================="
|
||||
# Don't exit - let the rest of setup complete, but warn
|
||||
else
|
||||
echo " Bootstrap completed successfully!"
|
||||
echo " Agent should register with local orchestrator within 30 seconds."
|
||||
fi
|
||||
else
|
||||
if [[ ! -f "$BOOTSTRAP_SCRIPT" ]]; then
|
||||
echo "Warning: Bootstrap script not found at $BOOTSTRAP_SCRIPT"
|
||||
fi
|
||||
if [[ ! -f "$CREDENTIALS_FILE" ]]; then
|
||||
echo "Warning: Credentials file not found at $CREDENTIALS_FILE"
|
||||
echo "Run env_setup.sh first to generate credentials."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Collect domains from deployed tools' nginx configs (for SSL)
|
||||
SSL_DOMAINS=()
|
||||
if [[ ${#DEPLOYED_TOOLS[@]} -gt 0 ]]; then
|
||||
for tool_name in "${DEPLOYED_TOOLS[@]}"; do
|
||||
tool_conf="/etc/nginx/sites-enabled/${tool_name}.conf"
|
||||
if [[ -f "$tool_conf" ]]; then
|
||||
# Extract server_name values (excluding placeholders and _)
|
||||
while IFS= read -r domain; do
|
||||
if [[ -n "$domain" && "$domain" != "_" && ! "$domain" =~ \{\{ ]]; then
|
||||
SSL_DOMAINS+=("$domain")
|
||||
fi
|
||||
done < <(grep -h "server_name" "$tool_conf" 2>/dev/null | awk '{print $2}' | tr -d ';' | sort -u)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SSL CERTIFICATE SETUP
|
||||
# =============================================================================
|
||||
|
||||
echo "[10/10] Setting up SSL certificates..."
|
||||
|
||||
# NOTE: Certbot cron disabled - crontab hangs in non-interactive mode
|
||||
# Certbot installs its own systemd timer, so manual cron not needed
|
||||
echo "Certbot renewal handled by systemd timer (certbot.timer)"
|
||||
|
||||
if [[ "$SKIP_SSL" == "true" ]]; then
|
||||
echo "Skipping SSL setup (--skip-ssl flag set)"
|
||||
elif [[ ${#SSL_DOMAINS[@]} -eq 0 ]]; then
|
||||
echo "No deployed tools with valid domains found."
|
||||
echo "Skipping SSL setup. Either:"
|
||||
echo " - No tools were deployed, or"
|
||||
echo " - Templates not replaced (run env_setup.sh first with --domain parameter)"
|
||||
echo "To manually setup SSL later: certbot --nginx -d yourdomain.com"
|
||||
else
|
||||
# Remove duplicates from SSL_DOMAINS
|
||||
SSL_DOMAINS=($(printf '%s\n' "${SSL_DOMAINS[@]}" | sort -u))
|
||||
|
||||
# Add root domain if --root-ssl flag is set
|
||||
if [[ "$ROOT_SSL" == "true" && -n "$DOMAIN" ]]; then
|
||||
# Check if root domain is not already in the list
|
||||
if [[ ! " ${SSL_DOMAINS[*]} " =~ " ${DOMAIN} " ]]; then
|
||||
SSL_DOMAINS+=("$DOMAIN")
|
||||
echo "Including root domain: $DOMAIN"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "----"
|
||||
echo "Setting up SSL certificates for deployed tools:"
|
||||
for domain in "${SSL_DOMAINS[@]}"; do
|
||||
echo " - $domain"
|
||||
done
|
||||
echo ""
|
||||
echo "Make sure DNS entries point to this server IP before proceeding."
|
||||
|
||||
# Derive email from domain parameter or use default
|
||||
if [[ -n "$DOMAIN" ]]; then
|
||||
SSL_EMAIL="administrator@${DOMAIN}"
|
||||
else
|
||||
# Try to extract base domain from first SSL domain
|
||||
FIRST_DOMAIN="${SSL_DOMAINS[0]}"
|
||||
# Extract base domain (remove subdomain)
|
||||
BASE_DOMAIN=$(echo "$FIRST_DOMAIN" | awk -F. '{if(NF>2) print $(NF-1)"."$NF; else print $0}')
|
||||
SSL_EMAIL="administrator@${BASE_DOMAIN}"
|
||||
fi
|
||||
|
||||
echo "Using email: $SSL_EMAIL"
|
||||
|
||||
# Build domain arguments for certbot
|
||||
DOMAIN_ARGS=""
|
||||
for domain in "${SSL_DOMAINS[@]}"; do
|
||||
DOMAIN_ARGS="$DOMAIN_ARGS -d $domain"
|
||||
done
|
||||
|
||||
# Run certbot non-interactively with specific domains
|
||||
sudo certbot --nginx \
|
||||
--non-interactive \
|
||||
--agree-tos \
|
||||
--email "$SSL_EMAIL" \
|
||||
--redirect \
|
||||
$DOMAIN_ARGS \
|
||||
|| echo "Certbot completed (some domains may have failed - check DNS)"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# COMPLETION SUMMARY
|
||||
# =============================================================================
|
||||
|
||||
echo ""
|
||||
echo "----"
|
||||
echo "Configured domains:"
|
||||
for conf_file in /etc/nginx/sites-enabled/*.conf; do
|
||||
if [[ -f "$conf_file" ]]; then
|
||||
server_names=$(grep -E "^\s*server_name\s+" "$conf_file" 2>/dev/null | awk '{print $2}' | tr -d ';' | sort | uniq)
|
||||
for server_name in $server_names; do
|
||||
if [[ "$server_name" != "_" ]]; then
|
||||
echo " - $server_name ($conf_file)"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
SERVER_IP=$(curl -4 -s ifconfig.co)
|
||||
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo " LetsBe Server Setup Complete"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
echo "Server IP: $SERVER_IP"
|
||||
echo "SSH Port: 22022"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo "SSH User: $ADMIN_USER (key-based auth only)"
|
||||
else
|
||||
echo "SSH User: root (key-based auth only, no admin user configured)"
|
||||
fi
|
||||
echo ""
|
||||
echo "Portainer (if deployed): https://$SERVER_IP:9443"
|
||||
echo ""
|
||||
echo "Important:"
|
||||
echo " - Configure rclone for backups: rclone config"
|
||||
echo " - SSH port changed to 22022"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo " - User '$ADMIN_USER' has Docker access (key in /home/$ADMIN_USER/.ssh/)"
|
||||
fi
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
|
||||
# =============================================================================
|
||||
# MARK SETUP AS COMPLETE (before SSH restart)
|
||||
# =============================================================================
|
||||
|
||||
touch /opt/letsbe/.setup_installed
|
||||
echo "Setup marked as complete."
|
||||
|
||||
# =============================================================================
|
||||
# RESTART SSH (MUST BE LAST - This will disconnect the session!)
|
||||
# =============================================================================
|
||||
|
||||
echo ""
|
||||
echo "Restarting SSH on port 22022... (connection will drop)"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo "Reconnect with: ssh -i id_ed25519 -p 22022 $ADMIN_USER@$SERVER_IP"
|
||||
else
|
||||
echo "Reconnect with: ssh -p 22022 root@$SERVER_IP"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Small delay to ensure output is sent before disconnect
|
||||
sleep 2
|
||||
|
||||
systemctl restart sshd
|
||||
Reference in New Issue
Block a user