Include full contents of all nested repositories
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
14ff8fd54c
commit
2401ed446f
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 0b11b45e074793a36967a4c68fca98a9bb498409
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Serena
|
||||
.serena/
|
||||
|
||||
# Logs and runtime files
|
||||
*.log
|
||||
logs/
|
||||
jobs/
|
||||
config.json
|
||||
|
||||
# Docker
|
||||
docker-compose.yml
|
||||
docker-compose.override.yml
|
||||
Dockerfile.dev
|
||||
|
||||
# Documentation
|
||||
README.md
|
||||
CHANGELOG.md
|
||||
docs/
|
||||
|
||||
# Test files
|
||||
tests/
|
||||
*.test.*
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
env:
|
||||
REGISTRY: code.letsbe.solutions
|
||||
IMAGE_NAME: letsbe/ansible-runner
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Gitea Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ gitea.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels)
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Python
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.venv/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Serena
|
||||
.serena/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Job files (created at runtime)
|
||||
jobs/
|
||||
config.json
|
||||
*.json
|
||||
|
||||
# Docker artifacts
|
||||
docker-compose.override.yml
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer="LetsBe Cloud <hello@letsbe.solutions>"
|
||||
LABEL description="LetsBe Ansible Runner - Containerized provisioning for Hub-triggered jobs"
|
||||
|
||||
# Install required packages
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
openssh-client \
|
||||
sshpass \
|
||||
jq \
|
||||
curl \
|
||||
ca-certificates \
|
||||
openssl \
|
||||
zip \
|
||||
unzip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create directory structure
|
||||
RUN mkdir -p /workspace /job /logs /output
|
||||
|
||||
# Copy scripts from the Ansible Setup Script
|
||||
COPY scripts/ /workspace/scripts/
|
||||
COPY stacks/ /workspace/stacks/
|
||||
COPY nginx/ /workspace/nginx/
|
||||
|
||||
# Copy entrypoint
|
||||
COPY entrypoint.sh /workspace/entrypoint.sh
|
||||
|
||||
# Make scripts executable and fix line endings
|
||||
RUN chmod +x /workspace/entrypoint.sh /workspace/scripts/*.sh && \
|
||||
find /workspace -type f -name "*.sh" -exec sed -i 's/\r$//' {} \;
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Environment variables (to be provided at runtime)
|
||||
ENV HUB_API_URL="https://hub.letsbe.solutions" \
|
||||
JOB_ID="" \
|
||||
RUNNER_TOKEN="" \
|
||||
JOB_CONFIG_PATH="/job/config.json"
|
||||
|
||||
# Entrypoint
|
||||
ENTRYPOINT ["/workspace/entrypoint.sh"]
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
# LetsBe Ansible Runner - Docker Compose for Hub Integration
|
||||
#
|
||||
# This file is used by the Hub to spawn provisioning containers.
|
||||
# Each provisioning job gets its own container instance.
|
||||
#
|
||||
# Usage (by Hub):
|
||||
# JOB_ID="abc123" HUB_API_URL="https://hub.letsbe.solutions" \
|
||||
# RUNNER_TOKEN="..." CONFIG_PATH="/path/to/config.json" \
|
||||
# docker compose -p letsbe-runner-abc123 up -d
|
||||
#
|
||||
# The container will:
|
||||
# 1. Load config from /job/config.json
|
||||
# 2. SSH to target server
|
||||
# 3. Upload and execute provisioning scripts
|
||||
# 4. Stream logs back to Hub API
|
||||
# 5. Exit when complete (success or failure)
|
||||
|
||||
services:
|
||||
runner:
|
||||
build: .
|
||||
image: ${RUNNER_IMAGE:-code.letsbe.solutions/letsbe/ansible-runner:latest}
|
||||
container_name: letsbe-runner-${JOB_ID:-manual}
|
||||
|
||||
# Mount the job-specific config file
|
||||
volumes:
|
||||
- ${CONFIG_PATH:-./config.json}:/job/config.json:ro
|
||||
- ${LOGS_PATH:-./logs}:/logs
|
||||
|
||||
# Environment variables for Hub communication
|
||||
environment:
|
||||
- HUB_API_URL=${HUB_API_URL:-https://hub.letsbe.solutions}
|
||||
- JOB_ID=${JOB_ID}
|
||||
- RUNNER_TOKEN=${RUNNER_TOKEN}
|
||||
- JOB_CONFIG_PATH=/job/config.json
|
||||
|
||||
# Network configuration
|
||||
# Development: Use 'host' or add extra_hosts for host.docker.internal
|
||||
# Production: Use 'letsbe-network' shared network
|
||||
network_mode: ${NETWORK_MODE:-bridge}
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
# Resource limits to prevent runaway processes
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.0'
|
||||
memory: 512M
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 128M
|
||||
|
||||
# Container will exit when provisioning completes
|
||||
# Hub should poll for status via API
|
||||
restart: "no"
|
||||
|
||||
# Health check (optional - container is short-lived)
|
||||
# healthcheck:
|
||||
# test: ["CMD", "curl", "-f", "http://localhost/health"]
|
||||
# interval: 30s
|
||||
# timeout: 10s
|
||||
# retries: 3
|
||||
|
||||
# Optional: Define a persistent network for production
|
||||
networks:
|
||||
default:
|
||||
name: ${DOCKER_NETWORK:-letsbe-runner-network}
|
||||
driver: bridge
|
||||
|
|
@ -0,0 +1,322 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# LetsBe Ansible Runner - Hub-Integrated Entrypoint
|
||||
#
|
||||
# This script is the entry point for the containerized Ansible runner.
|
||||
# It streams logs to the Hub API and updates job status on completion/failure.
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
HUB_API_URL="${HUB_API_URL:-https://hub.letsbe.solutions}"
|
||||
JOB_ID="${JOB_ID:-}"
|
||||
RUNNER_TOKEN="${RUNNER_TOKEN:-}"
|
||||
JOB_CONFIG_PATH="${JOB_CONFIG_PATH:-/job/config.json}"
|
||||
CURRENT_STEP="init"
|
||||
COMPLETION_MARKED=false
|
||||
|
||||
# Portainer credentials (read from remote server)
|
||||
PORTAINER_USER=""
|
||||
PORTAINER_PASS=""
|
||||
|
||||
# Global file for passing result JSON (avoids bash variable expansion issues)
|
||||
RESULT_FILE="/tmp/job_result.json"
|
||||
|
||||
# Logging to Hub API
|
||||
log_to_hub() {
|
||||
local level="$1" message="$2" step="${3:-$CURRENT_STEP}" progress="${4:-}"
|
||||
echo "[$(date '+%H:%M:%S')] [$level] [$step] $message"
|
||||
if [[ -n "$JOB_ID" && -n "$RUNNER_TOKEN" ]]; then
|
||||
local payload="{\"level\":\"${level}\",\"message\":\"${message}\",\"step\":\"${step}\""
|
||||
[[ -n "$progress" ]] && payload="${payload},\"progress\":${progress}"
|
||||
payload="${payload}}"
|
||||
curl -s -X POST "${HUB_API_URL}/api/v1/jobs/${JOB_ID}/logs" \
|
||||
-H "X-Runner-Token: ${RUNNER_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$payload" > /dev/null 2>&1 || true
|
||||
fi
|
||||
}
|
||||
|
||||
log_info() { log_to_hub "info" "$1" "${2:-$CURRENT_STEP}" "${3:-}"; }
|
||||
log_warn() { log_to_hub "warn" "$1" "${2:-$CURRENT_STEP}" "${3:-}"; }
|
||||
log_error() { log_to_hub "error" "$1" "${2:-$CURRENT_STEP}" "${3:-}"; }
|
||||
|
||||
update_job_status() {
|
||||
local status="$1" error="${2:-}"
|
||||
if [[ -n "$JOB_ID" && -n "$RUNNER_TOKEN" ]]; then
|
||||
local payload_file="/tmp/payload_$$.json"
|
||||
|
||||
log_info "update_job_status called with status=$status" "debug"
|
||||
|
||||
# Check if result file exists and has content
|
||||
if [[ -f "$RESULT_FILE" && -s "$RESULT_FILE" ]]; then
|
||||
log_info "Result file exists, size: $(wc -c < "$RESULT_FILE") bytes" "debug"
|
||||
|
||||
# Validate it's proper JSON
|
||||
if jq -e . "$RESULT_FILE" > /dev/null 2>&1; then
|
||||
log_info "Result file contains valid JSON" "debug"
|
||||
# Build payload with result embedded
|
||||
jq -n --arg status "$status" --slurpfile result "$RESULT_FILE" \
|
||||
'{status: $status, result: $result[0]}' > "$payload_file"
|
||||
else
|
||||
log_error "Result file is not valid JSON" "debug"
|
||||
jq -n --arg status "$status" '{status: $status}' > "$payload_file"
|
||||
fi
|
||||
elif [[ -n "$error" ]]; then
|
||||
log_info "Building error payload" "debug"
|
||||
jq -n --arg status "$status" --arg error "$error" \
|
||||
'{status: $status, error: $error}' > "$payload_file"
|
||||
else
|
||||
log_info "Building status-only payload" "debug"
|
||||
jq -n --arg status "$status" '{status: $status}' > "$payload_file"
|
||||
fi
|
||||
|
||||
# Show payload size (not content, to avoid log issues)
|
||||
log_info "Payload size: $(wc -c < "$payload_file") bytes" "debug"
|
||||
|
||||
# Send to Hub
|
||||
local http_code
|
||||
http_code=$(curl -s -w "%{http_code}" -o /tmp/hub_response.txt -X PATCH "${HUB_API_URL}/api/v1/jobs/${JOB_ID}" \
|
||||
-H "X-Runner-Token: ${RUNNER_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "@$payload_file") || true
|
||||
|
||||
log_info "Hub responded with HTTP $http_code" "debug"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$payload_file" /tmp/hub_response.txt
|
||||
fi
|
||||
}
|
||||
|
||||
mark_completed() {
|
||||
log_info "mark_completed called" "complete"
|
||||
|
||||
if [[ "$COMPLETION_MARKED" == "true" ]]; then
|
||||
log_info "Completion already marked, skipping" "complete"
|
||||
return
|
||||
fi
|
||||
COMPLETION_MARKED=true
|
||||
|
||||
log_info "Job completed" "complete" "100"
|
||||
|
||||
# Check if result file was prepared
|
||||
if [[ -f "$RESULT_FILE" ]]; then
|
||||
log_info "Result file ready: $(wc -c < "$RESULT_FILE") bytes" "complete"
|
||||
else
|
||||
log_warn "No result file found" "complete"
|
||||
fi
|
||||
|
||||
log_info "Calling update_job_status" "complete"
|
||||
update_job_status "completed"
|
||||
log_info "update_job_status returned" "complete"
|
||||
}
|
||||
|
||||
mark_failed() {
|
||||
if [[ "$COMPLETION_MARKED" == "true" ]]; then
|
||||
log_info "Completion already marked, skipping failure" "failed"
|
||||
return
|
||||
fi
|
||||
COMPLETION_MARKED=true
|
||||
|
||||
log_error "Job failed: $1" "failed"
|
||||
update_job_status "failed" "$1"
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
local exit_code=$?
|
||||
if [[ $exit_code -ne 0 && "$COMPLETION_MARKED" != "true" ]]; then
|
||||
mark_failed "Script exited with code $exit_code"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# SSH helpers
|
||||
SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=30"
|
||||
ssh_run() { sshpass -p "$SERVER_PASSWORD" ssh $SSH_OPTS -p "$SERVER_PORT" root@"$SERVER_IP" "$@"; }
|
||||
scp_upload() { sshpass -p "$SERVER_PASSWORD" scp $SSH_OPTS -P "$SERVER_PORT" "$1" root@"$SERVER_IP":"$2"; }
|
||||
scp_upload_dir() { sshpass -p "$SERVER_PASSWORD" scp -r $SSH_OPTS -P "$SERVER_PORT" "$1" root@"$SERVER_IP":"$2"; }
|
||||
|
||||
# Docker registry login on target server
|
||||
step_docker_login() {
|
||||
CURRENT_STEP="docker-login"
|
||||
|
||||
# Read Docker Hub credentials from config
|
||||
local dockerhub_user=$(jq -r '.dockerHub.username // empty' "$JOB_CONFIG_PATH")
|
||||
local dockerhub_token=$(jq -r '.dockerHub.token // empty' "$JOB_CONFIG_PATH")
|
||||
local dockerhub_registry=$(jq -r '.dockerHub.registry // empty' "$JOB_CONFIG_PATH")
|
||||
|
||||
# Read Gitea credentials from config
|
||||
local gitea_registry=$(jq -r '.gitea.registry // empty' "$JOB_CONFIG_PATH")
|
||||
local gitea_user=$(jq -r '.gitea.username // empty' "$JOB_CONFIG_PATH")
|
||||
local gitea_token=$(jq -r '.gitea.token // empty' "$JOB_CONFIG_PATH")
|
||||
|
||||
# Login to Docker Hub if credentials provided
|
||||
if [[ -n "$dockerhub_user" && -n "$dockerhub_token" ]]; then
|
||||
log_info "Logging into Docker Hub" "docker-login" "12"
|
||||
if [[ -n "$dockerhub_registry" ]]; then
|
||||
ssh_run "echo '$dockerhub_token' | docker login -u '$dockerhub_user' --password-stdin '$dockerhub_registry'" 2>&1 || log_warn "Docker Hub login failed"
|
||||
else
|
||||
ssh_run "echo '$dockerhub_token' | docker login -u '$dockerhub_user' --password-stdin" 2>&1 || log_warn "Docker Hub login failed"
|
||||
fi
|
||||
log_info "Docker Hub login complete" "docker-login"
|
||||
else
|
||||
log_info "No Docker Hub credentials, skipping login" "docker-login"
|
||||
fi
|
||||
|
||||
# Login to Gitea registry if credentials provided
|
||||
if [[ -n "$gitea_registry" && -n "$gitea_user" && -n "$gitea_token" ]]; then
|
||||
log_info "Logging into Gitea registry: $gitea_registry" "docker-login" "14"
|
||||
ssh_run "echo '$gitea_token' | docker login -u '$gitea_user' --password-stdin '$gitea_registry'" 2>&1 || log_warn "Gitea registry login failed"
|
||||
log_info "Gitea registry login complete" "docker-login"
|
||||
else
|
||||
log_info "No Gitea credentials, skipping registry login" "docker-login"
|
||||
fi
|
||||
}
|
||||
|
||||
# Load config
|
||||
load_config() {
|
||||
CURRENT_STEP="config"
|
||||
log_info "Loading job configuration"
|
||||
[[ ! -f "$JOB_CONFIG_PATH" ]] && { log_error "Config not found"; exit 1; }
|
||||
SERVER_IP=$(jq -r '.server.ip' "$JOB_CONFIG_PATH")
|
||||
SERVER_PORT=$(jq -r '.server.port // 22' "$JOB_CONFIG_PATH")
|
||||
SERVER_PASSWORD=$(jq -r '.server.rootPassword' "$JOB_CONFIG_PATH")
|
||||
CUSTOMER=$(jq -r '.customer' "$JOB_CONFIG_PATH")
|
||||
DOMAIN=$(jq -r '.domain' "$JOB_CONFIG_PATH")
|
||||
COMPANY_NAME=$(jq -r '.companyName' "$JOB_CONFIG_PATH")
|
||||
LICENSE_KEY=$(jq -r '.licenseKey' "$JOB_CONFIG_PATH")
|
||||
DASHBOARD_TIER=$(jq -r '.dashboardTier' "$JOB_CONFIG_PATH")
|
||||
TOOLS=$(jq -r '.tools | join(",")' "$JOB_CONFIG_PATH")
|
||||
log_info "Config: $DOMAIN on $SERVER_IP" "config" "5"
|
||||
}
|
||||
|
||||
# Provisioning steps
|
||||
step_prepare() { CURRENT_STEP="prepare"; log_info "Creating directories" "prepare" "10"; ssh_run "mkdir -p /opt/letsbe/{scripts,env,stacks,nginx,config}"; }
|
||||
|
||||
step_upload() {
|
||||
CURRENT_STEP="upload"
|
||||
log_info "Uploading files" "upload" "20"
|
||||
scp_upload "/workspace/scripts/env_setup.sh" "/opt/letsbe/scripts/"
|
||||
scp_upload "/workspace/scripts/setup.sh" "/opt/letsbe/scripts/"
|
||||
ssh_run "chmod +x /opt/letsbe/scripts/*.sh"
|
||||
log_info "Uploading nginx" "upload" "30"
|
||||
scp_upload_dir "/workspace/nginx/." "/opt/letsbe/nginx/"
|
||||
log_info "Uploading stacks" "upload" "40"
|
||||
scp_upload_dir "/workspace/stacks/." "/opt/letsbe/stacks/"
|
||||
}
|
||||
|
||||
step_env() {
|
||||
CURRENT_STEP="env"
|
||||
log_info "Setting up environment" "env" "50"
|
||||
ssh_run "bash /opt/letsbe/scripts/env_setup.sh --customer '$CUSTOMER' --domain '$DOMAIN' --company '$COMPANY_NAME' --license-key '$LICENSE_KEY'"
|
||||
}
|
||||
|
||||
# Read Portainer credentials from the remote server
|
||||
read_portainer_credentials() {
|
||||
CURRENT_STEP="credentials"
|
||||
log_info "Reading Portainer credentials from server" "credentials" "55"
|
||||
|
||||
# Read credentials.env from remote server
|
||||
local creds_content
|
||||
creds_content=$(ssh_run "cat /opt/letsbe/env/credentials.env 2>/dev/null" || echo "")
|
||||
|
||||
if [[ -n "$creds_content" ]]; then
|
||||
# Extract Portainer credentials
|
||||
PORTAINER_USER=$(echo "$creds_content" | grep "^PORTAINER_ADMIN_USER=" | cut -d'=' -f2 | tr -d '\r\n' || echo "")
|
||||
PORTAINER_PASS=$(echo "$creds_content" | grep "^PORTAINER_ADMIN_PASSWORD=" | cut -d'=' -f2 | tr -d '\r\n' || echo "")
|
||||
|
||||
log_info "Portainer user available: ${PORTAINER_USER:-none}" "credentials"
|
||||
log_info "Portainer pass available: $([ -n "$PORTAINER_PASS" ] && echo "yes" || echo "no")" "credentials"
|
||||
else
|
||||
log_warn "Could not read credentials.env from server" "credentials"
|
||||
fi
|
||||
}
|
||||
|
||||
step_setup() {
|
||||
CURRENT_STEP="setup"
|
||||
log_info "Running setup (10-15 min)" "setup" "60"
|
||||
|
||||
# Run setup script - note: setup.sh restarts SSH at the end which drops the connection
|
||||
# This is expected behavior, so we ignore the exit code from the SSH command
|
||||
set +e
|
||||
ssh_run "bash /opt/letsbe/scripts/setup.sh --tools '$TOOLS' --domain '$DOMAIN'" 2>&1 | while read -r line; do
|
||||
[[ -n "$line" ]] && log_info "$line" "setup"
|
||||
done
|
||||
local setup_exit=$?
|
||||
set -e
|
||||
|
||||
# Give SSH time to restart on new port
|
||||
sleep 3
|
||||
|
||||
# Verify setup completed by checking if we can connect on new SSH port (22022)
|
||||
log_info "Verifying setup completion on new SSH port..." "setup"
|
||||
if sshpass -p "$SERVER_PASSWORD" ssh $SSH_OPTS -p 22022 root@"$SERVER_IP" "echo 'SSH reconnected on port 22022'" 2>/dev/null; then
|
||||
log_info "Setup complete - SSH accessible on port 22022" "setup" "90"
|
||||
else
|
||||
# Try original port as fallback (maybe SSH restart didn't happen)
|
||||
if ssh_run "echo 'SSH still on original port'" 2>/dev/null; then
|
||||
log_info "Setup complete - SSH still on original port" "setup" "90"
|
||||
else
|
||||
log_warn "Could not verify SSH connectivity, but setup logs indicate completion" "setup"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
step_finalize() {
|
||||
CURRENT_STEP="finalize"
|
||||
log_info "Finalizing" "finalize" "95"
|
||||
|
||||
log_info "Portainer user: '${PORTAINER_USER:-}'" "finalize"
|
||||
log_info "Portainer pass length: ${#PORTAINER_PASS}" "finalize"
|
||||
|
||||
# Write result JSON directly to file (avoids all bash variable issues)
|
||||
if [[ -n "$PORTAINER_USER" && -n "$PORTAINER_PASS" ]]; then
|
||||
log_info "Building result with Portainer credentials" "finalize"
|
||||
jq -n -c \
|
||||
--arg dashboard_url "https://dashboard.${DOMAIN}" \
|
||||
--arg portainer_url "https://${SERVER_IP}:9443" \
|
||||
--arg portainer_username "$PORTAINER_USER" \
|
||||
--arg portainer_password "$PORTAINER_PASS" \
|
||||
'{dashboard_url:$dashboard_url,portainer_url:$portainer_url,portainer_username:$portainer_username,portainer_password:$portainer_password}' \
|
||||
> "$RESULT_FILE"
|
||||
else
|
||||
log_warn "No Portainer credentials available" "finalize"
|
||||
jq -n -c \
|
||||
--arg dashboard_url "https://dashboard.${DOMAIN}" \
|
||||
--arg portainer_url "https://${SERVER_IP}:9443" \
|
||||
'{dashboard_url:$dashboard_url,portainer_url:$portainer_url}' \
|
||||
> "$RESULT_FILE"
|
||||
fi
|
||||
|
||||
# Verify the file was created and contains valid JSON
|
||||
if [[ -f "$RESULT_FILE" ]]; then
|
||||
log_info "Result file created: $(wc -c < "$RESULT_FILE") bytes" "finalize"
|
||||
if jq -e . "$RESULT_FILE" > /dev/null 2>&1; then
|
||||
log_info "Result file validated as JSON" "finalize"
|
||||
else
|
||||
log_error "Result file is NOT valid JSON!" "finalize"
|
||||
fi
|
||||
else
|
||||
log_error "Failed to create result file!" "finalize"
|
||||
fi
|
||||
|
||||
log_info "Calling mark_completed" "finalize"
|
||||
mark_completed
|
||||
log_info "mark_completed returned" "finalize"
|
||||
}
|
||||
|
||||
# Main
|
||||
main() {
|
||||
log_info "LetsBe Runner starting"
|
||||
load_config
|
||||
step_prepare
|
||||
step_docker_login
|
||||
step_upload
|
||||
step_env
|
||||
read_portainer_credentials
|
||||
step_setup
|
||||
step_finalize
|
||||
}
|
||||
|
||||
main
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_activepieces }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_activepieces }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3056;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
server_name {{ domain_botlab }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.1.8:3000; # Backend for typebot-builder
|
||||
proxy_http_version 1.1;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
|
||||
}
|
||||
server {
|
||||
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_botlab }};
|
||||
return 404; # managed by Certbot
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
server_name {{ domain_typebot }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.1.9:3000; # Backend for bot-viewer
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
|
||||
}
|
||||
server {
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_typebot }};
|
||||
return 404; # managed by Certbot
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_calcom }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_calcom }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3018;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_chatwoot }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_chatwoot }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3011;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_chatwoot_helpdesk }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_chatwoot_helpdesk }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3011;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_documenso }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_documenso }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:3020;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3054;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_ghost }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_ghost }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:2368;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_gitea_drone }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_gitea_drone }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3009;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_gitea }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_gitea }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3007;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_glitchtip }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_glitchtip }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3017;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_html }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_html }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3000;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_keycloak }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_keycloak }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port 443;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_librechat }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_librechat }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_listmonk }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_listmonk }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3006;
|
||||
proxy_set_header Host $host:$server_port;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_minio }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_minio }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.26.2:9001;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded_Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
|
||||
# Remove existing CORS headers from MinIO to prevent duplicates
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
|
||||
# CORS Settings
|
||||
add_header 'Access-Control-Allow-Origin' '*' always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
|
||||
add_header 'Access-Control-Allow-Headers' '*' always;
|
||||
add_header 'Access-Control-Expose-Headers' 'ETag' always;
|
||||
|
||||
# Handle CORS preflight requests
|
||||
if ($request_method = 'OPTIONS') {
|
||||
add_header 'Content-Length' 0;
|
||||
add_header 'Content-Type' 'text/plain; charset=utf-8';
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-PO";
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 0;
|
||||
server_name {{ domain_s3 }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.26.2:9000; # S3-compatible service
|
||||
proxy_set_header Host $http_host; # Essential for S3 bucket ops
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Remove existing cors headers from MinIO to prevent duplicates
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
|
||||
# CORS Settings
|
||||
add_header 'Access-Control-Allow-Origin' '*' always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
|
||||
add_header 'Access-Control-Allow-Headers' '*' always;
|
||||
add_header 'Access-Control-Expose-Headers' 'Origin, Content-Type, Content-MD5, Content-Disposition, ETag' always;
|
||||
|
||||
# Handle CORS preflight requests
|
||||
if ($request_method = 'OPTIONS') {
|
||||
add_header 'Content-Length' 0;
|
||||
add_header 'Content-Type' 'text/plain; charset=utf-8';
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
|
||||
# ACME Challenge Location (for Let's Encrypt)
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type 'text/plain';
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
#include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
#ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ domain_s3 }};
|
||||
return 404; # managed by Certbot
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_n8n }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_n8n }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3025;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,233 @@
|
|||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_nextcloud }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 500M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_nextcloud }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3023;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Scheme $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
#proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header Accept-Encoding "";
|
||||
proxy_set_header Host $host;
|
||||
|
||||
client_body_buffer_size 512k;
|
||||
proxy_read_timeout 86400s;
|
||||
client_max_body_size 0;
|
||||
|
||||
# Websocket
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
}
|
||||
|
||||
#location /whiteboard/ {
|
||||
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# proxy_set_header Host $host;
|
||||
|
||||
# proxy_pass http://0.0.0.0:3002
|
||||
|
||||
# proxy_http_version 1.1;
|
||||
# proxy_set_header Upgrade $http_upgrade;
|
||||
# proxy_set_header Connection "upgrade";
|
||||
#}
|
||||
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||
ssl_session_tickets off;
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
# Optional settings:
|
||||
|
||||
# OCSP stapling
|
||||
# ssl_stapling on;
|
||||
# ssl_stapling_verify on;
|
||||
# ssl_trusted_certificate /etc/letsencrypt/live/<your-nc-domain>/chain.pem;
|
||||
|
||||
# replace with the IP address of your resolver
|
||||
# resolver 127.0.0.1; # needed for oscp stapling: e.g. use 94.140.15.15 for adguard / 1.1.1.1 for cloudflared or 8.8.8.8 for google - you can use the same nameserver as listed in your /etc/resolv.conf file
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
location /.well-known/carddav {
|
||||
return 301 $scheme://$host/remote.php/dav;
|
||||
}
|
||||
location /.well-known/caldav {
|
||||
return 301 $scheme://$host/remote.php/dav;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_collabora }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_collabora }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass https://0.0.0.0:3044;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header X-Frontend-Host $host;
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_signaling }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_signaling }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:3061;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support (required for signaling)
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_whiteboard }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file ;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3060;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header X-Frontend-Host $host;
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_nocodb }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_nocodb }};
|
||||
|
||||
# SSL Certificates (to be updated by Certbot)
|
||||
|
||||
# Uncomment this if you want to enforce HSTS
|
||||
# add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
# Allow embedding in iframe
|
||||
add_header X-Frame-Options "ALLOWALL";
|
||||
add_header Content-Security-Policy "frame-ancestors *;";
|
||||
|
||||
# CORS Headers
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, PUT, DELETE';
|
||||
add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type';
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3057;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Support WebSocket
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_buffer_size 128k;
|
||||
proxy_buffers 4 256k;
|
||||
proxy_busy_buffers_size 256k;
|
||||
}
|
||||
|
||||
location ~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_odoo }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_odoo }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3019;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_penpot }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_penpot }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3021;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_portainer }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_portainer }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:9000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port 443;
|
||||
|
||||
# WebSocket support (used by Portainer console/exec)
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_poste }};
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
proxy_pass http://0.0.0.0:3003;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_poste }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass https://0.0.0.0:3004;
|
||||
proxy_buffering off;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
proxy_pass http://0.0.0.0:3003;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
server {
|
||||
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_redash }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_redash }};
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3064;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 30s;
|
||||
proxy_http_version 1.1;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
server {
|
||||
client_max_body_size 0;
|
||||
server_name {{ domain_s3 }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:9000; # Proxy to MinIO or your S3-compatible service
|
||||
proxy_set_header Host $http_host; # Essential for S3 bucket operations
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# Remove existing cors headers from MinIO to prevent duplicates
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
|
||||
# CORS Settings
|
||||
add_header 'Access-Control-Allow-Origin' '*' always;
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always;
|
||||
add_header 'Access-Control-Allow-Headers' '*' always;
|
||||
add_header 'Access-Control-Expose-Headers' 'Origin, Content-Type, Content-MD5, Content-Disposition, ETag' always;
|
||||
|
||||
# Handle CORS preflight requests
|
||||
if ($request_method = 'OPTIONS') {
|
||||
add_header 'Content-Length' 0;
|
||||
add_header 'Content-Type' 'text/plain; charset=utf-8';
|
||||
return 204;
|
||||
}
|
||||
}
|
||||
|
||||
# ACME Challenge Location (for Let's Encrypt)
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type 'text/plain';
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
|
||||
|
||||
}
|
||||
server {
|
||||
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_s3 }};
|
||||
return 404; # managed by Certbot
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_squidex }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_squidex }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3002;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_pdf }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name {{ domain_pdf }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8080;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# For websocket support if needed
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_read_timeout 86400;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
server_name {{ domain_botlab }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.25.3:3000; # Backend for typebot-builder
|
||||
proxy_http_version 1.1;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
#include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
#ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ domain_botlab }};
|
||||
return 404; # managed by Certbot
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
server_name {{ domain_bot_viewer }};
|
||||
|
||||
location / {
|
||||
proxy_pass http://172.20.25.4:3000; # Backend for bot-viewer
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
listen 443 ssl; # managed by Certbot
|
||||
ssl_certificate /etc/nginx/placeholder.crt; # managed by Certbot
|
||||
ssl_certificate_key /etc/nginx/placeholder.key; # managed by Certbot
|
||||
#include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
|
||||
#ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{ domain_bot_viewer }};
|
||||
return 404; # managed by Certbot
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_umami }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_umami }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3008;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_uptime_kuma }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_uptime_kuma }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3005;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
server {
|
||||
client_max_body_size 525M;
|
||||
|
||||
listen 80;
|
||||
server_name vault.{{ domain }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 525M;
|
||||
|
||||
listen 443 ssl http2;
|
||||
server_name vault.{{ domain }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
# Main application
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:3071;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Port 443;
|
||||
|
||||
# WebSocket support for live sync
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
# WebSocket notifications endpoint
|
||||
location /notifications/hub {
|
||||
proxy_pass http://127.0.0.1:3072;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
location /notifications/hub/negotiate {
|
||||
proxy_pass http://127.0.0.1:3071;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
if ($host = {{ domain_whiteboard }}) {
|
||||
return 301 https://$host$request_uri;
|
||||
} # managed by Certbot
|
||||
|
||||
client_max_body_size 64M;
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_whiteboard }};
|
||||
ssl_certificate /etc/letsencrypt/live/whiteboard.letsbe.solutions/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/whiteboard.letsbe.solutions/privkey.pem;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:4014;
|
||||
proxy_http_version 1.1;
|
||||
proxy_read_timeout 3600s;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header X-Frontend-Host $host;
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_windmill }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_windmill }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3014;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
server {
|
||||
client_max_body_size 64M;
|
||||
|
||||
listen 80;
|
||||
server_name {{ domain_wordpress }};
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
client_max_body_size 64M;
|
||||
#large_client_header_buffers 4 16k;
|
||||
|
||||
listen 443 ssl http2;
|
||||
|
||||
server_name {{ domain_wordpress }};
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
|
||||
#add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
|
||||
|
||||
#auth_basic "Restricted Content";
|
||||
#auth_basic_user_file letsbe-htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_pass http://0.0.0.0:3001;
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-IP $http_cf_connecting_ip;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
#proxy_buffers 16 4k;
|
||||
#proxy_buffer_size 2k;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
alias /var/www/html/.well-known/acme-challenge/;
|
||||
default_type "text/plain";
|
||||
allow all;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,472 @@
|
|||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# LetsBe Backup Script
|
||||
# =============================================================================
|
||||
# Backs up databases, env files, nginx configs, and tool configs.
|
||||
# Uploads to rclone remote if configured.
|
||||
# Rotates: 7 daily + 4 weekly backups.
|
||||
#
|
||||
# Usage:
|
||||
# /opt/letsbe/scripts/backups.sh
|
||||
#
|
||||
# Cron (installed by setup.sh):
|
||||
# 0 2 * * * /bin/bash /opt/letsbe/scripts/backups.sh >> /opt/letsbe/logs/backup.log 2>&1
|
||||
# =============================================================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
# =============================================================================
|
||||
# CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
BACKUP_DIR="/tmp/letsbe-backups"
|
||||
DATE=$(date +%Y%m%d_%H%M%S)
|
||||
DAY_OF_WEEK=$(date +%u) # 1=Monday, 7=Sunday
|
||||
RCLONE_REMOTE="backup"
|
||||
LOG_FILE="${LETSBE_BASE}/logs/backup.log"
|
||||
STATUS_FILE="${LETSBE_BASE}/config/backup-status.json"
|
||||
|
||||
# Ensure directories exist
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
mkdir -p "${LETSBE_BASE}/logs"
|
||||
mkdir -p "${LETSBE_BASE}/config"
|
||||
|
||||
# Tracking variables
|
||||
ERRORS=()
|
||||
FILES_BACKED_UP=0
|
||||
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
local msg="$*"
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR: $msg" >&2
|
||||
ERRORS+=("$msg")
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP FUNCTIONS
|
||||
# =============================================================================
|
||||
|
||||
# Backup a PostgreSQL database from a running container
|
||||
backup_postgres() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local db_user=${3:-postgres}
|
||||
local label=$4
|
||||
|
||||
# Find container by pattern (supports both prefixed and exact names)
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0 # Container not running, skip silently
|
||||
fi
|
||||
|
||||
log "Backing up PostgreSQL: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/pg_${label}_${DATE}.sql.gz"
|
||||
|
||||
if docker exec "$actual_container" pg_dump -U "$db_user" "$db_name" 2>/dev/null | gzip > "$output_file"; then
|
||||
# Verify the file is not empty (just gzip header)
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "PostgreSQL dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "PostgreSQL dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a MySQL/MariaDB database from a running container
|
||||
backup_mysql() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local db_user=${3:-root}
|
||||
local db_pass=$4
|
||||
local label=$5
|
||||
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up MySQL: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/mysql_${label}_${DATE}.sql.gz"
|
||||
|
||||
if docker exec "$actual_container" mysqldump -u"$db_user" -p"$db_pass" --single-transaction "$db_name" 2>/dev/null | gzip > "$output_file"; then
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MySQL dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MySQL dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a MongoDB database from a running container
|
||||
backup_mongo() {
|
||||
local container=$1
|
||||
local db_name=$2
|
||||
local label=$3
|
||||
|
||||
local actual_container
|
||||
actual_container=$(docker ps --format '{{.Names}}' | grep -E "(^|-)${container}$" | head -1)
|
||||
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up MongoDB: $label ($actual_container -> $db_name)..."
|
||||
local output_file="${BACKUP_DIR}/mongo_${label}_${DATE}.archive.gz"
|
||||
|
||||
if docker exec "$actual_container" mongodump --db "$db_name" --archive 2>/dev/null | gzip > "$output_file"; then
|
||||
if [[ $(stat -c%s "$output_file" 2>/dev/null || stat -f%z "$output_file" 2>/dev/null) -gt 100 ]]; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MongoDB dump empty for $label ($actual_container)"
|
||||
fi
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "MongoDB dump failed for $label ($actual_container)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup a directory as a tarball
|
||||
backup_directory() {
|
||||
local src_dir=$1
|
||||
local label=$2
|
||||
|
||||
if [[ ! -d "$src_dir" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
log "Backing up directory: $label ($src_dir)..."
|
||||
local output_file="${BACKUP_DIR}/dir_${label}_${DATE}.tar.gz"
|
||||
|
||||
if tar czf "$output_file" -C "$(dirname "$src_dir")" "$(basename "$src_dir")" 2>/dev/null; then
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
log " OK: $output_file"
|
||||
else
|
||||
rm -f "$output_file"
|
||||
log_error "Directory backup failed for $label ($src_dir)"
|
||||
fi
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# HELPER: Read credentials from env files
|
||||
# =============================================================================
|
||||
|
||||
# Read a variable from an env file
|
||||
read_env_var() {
|
||||
local file=$1
|
||||
local var_name=$2
|
||||
local default=${3:-}
|
||||
|
||||
if [[ -f "$file" ]]; then
|
||||
local value
|
||||
value=$(grep -E "^${var_name}=" "$file" 2>/dev/null | head -1 | cut -d'=' -f2- | sed 's/^["'"'"']//;s/["'"'"']$//')
|
||||
if [[ -n "$value" ]]; then
|
||||
echo "$value"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
echo "$default"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# START BACKUP
|
||||
# =============================================================================
|
||||
|
||||
log "=== LetsBe Backup Started - $DATE ==="
|
||||
|
||||
# =============================================================================
|
||||
# 1. POSTGRESQL DATABASE BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- PostgreSQL Databases ---"
|
||||
|
||||
# Read credentials from env files where needed
|
||||
CREDS_FILE="${LETSBE_BASE}/env/credentials.env"
|
||||
|
||||
# Chatwoot (user from credentials or default)
|
||||
CHATWOOT_USER=$(read_env_var "$CREDS_FILE" "CHATWOOT_POSTGRES_USERNAME" "chatwoot")
|
||||
backup_postgres "chatwoot-postgres" "chatwoot_production" "$CHATWOOT_USER" "chatwoot"
|
||||
|
||||
# Nextcloud
|
||||
NC_USER=$(read_env_var "$CREDS_FILE" "NEXTCLOUD_POSTGRES_USER" "nextcloud")
|
||||
backup_postgres "nextcloud-postgres" "nextcloud" "$NC_USER" "nextcloud"
|
||||
|
||||
# Keycloak
|
||||
backup_postgres "keycloak-db" "keycloak" "keycloak" "keycloak"
|
||||
|
||||
# n8n
|
||||
N8N_USER=$(read_env_var "${LETSBE_BASE}/env/n8n.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "n8n-postgres" "n8n" "$N8N_USER" "n8n"
|
||||
|
||||
# Cal.com
|
||||
CALCOM_USER=$(read_env_var "${LETSBE_BASE}/env/calcom.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "calcom-postgres" "calcom" "$CALCOM_USER" "calcom"
|
||||
|
||||
# Umami
|
||||
UMAMI_USER=$(read_env_var "$CREDS_FILE" "UMAMI_POSTGRES_USER" "postgres")
|
||||
backup_postgres "umami-db" "umami" "$UMAMI_USER" "umami"
|
||||
|
||||
# NocoDB
|
||||
backup_postgres "nocodb-postgres" "nocodb" "postgres" "nocodb"
|
||||
|
||||
# Typebot
|
||||
backup_postgres "typebot-db" "typebot" "postgres" "typebot"
|
||||
|
||||
# Windmill
|
||||
backup_postgres "windmill-db" "windmill" "postgres" "windmill"
|
||||
|
||||
# GlitchTip
|
||||
backup_postgres "glitchtip-postgres" "postgres" "postgres" "glitchtip"
|
||||
|
||||
# Penpot
|
||||
PENPOT_USER=$(read_env_var "$CREDS_FILE" "PENPOT_DB_USER" "postgres")
|
||||
backup_postgres "penpot-postgres" "penpot" "$PENPOT_USER" "penpot"
|
||||
|
||||
# Gitea
|
||||
GITEA_USER=$(read_env_var "$CREDS_FILE" "GITEA_POSTGRES_USER" "postgres")
|
||||
backup_postgres "gitea-db" "gitea" "$GITEA_USER" "gitea"
|
||||
|
||||
# Odoo
|
||||
ODOO_USER=$(read_env_var "$CREDS_FILE" "ODOO_POSTGRES_USER" "postgres")
|
||||
backup_postgres "odoo-postgres" "postgres" "$ODOO_USER" "odoo"
|
||||
|
||||
# Listmonk
|
||||
LISTMONK_USER=$(read_env_var "$CREDS_FILE" "LISTMONK_DB_USER" "postgres")
|
||||
backup_postgres "listmonk-db" "listmonk" "$LISTMONK_USER" "listmonk"
|
||||
|
||||
# Documenso
|
||||
DOCUMENSO_USER=$(read_env_var "$CREDS_FILE" "DOCUMENSO_POSTGRES_USER" "postgres")
|
||||
backup_postgres "documenso-db" "documenso_db" "$DOCUMENSO_USER" "documenso"
|
||||
|
||||
# Redash (container name may not have customer prefix)
|
||||
REDASH_USER=$(read_env_var "${LETSBE_BASE}/env/redash.env" "POSTGRES_USER" "postgres")
|
||||
backup_postgres "redash-postgres" "postgres" "$REDASH_USER" "redash"
|
||||
|
||||
# Activepieces (container name may not have customer prefix)
|
||||
ACTIVEPIECES_USER=$(read_env_var "${LETSBE_BASE}/env/activepieces.env" "AP_POSTGRES_USERNAME" "postgres")
|
||||
ACTIVEPIECES_DB=$(read_env_var "${LETSBE_BASE}/env/activepieces.env" "AP_POSTGRES_DATABASE" "activepieces")
|
||||
backup_postgres "activepieces-postgres" "$ACTIVEPIECES_DB" "$ACTIVEPIECES_USER" "activepieces"
|
||||
|
||||
# LibreChat vectordb (pgvector)
|
||||
LIBRECHAT_PG_USER=$(read_env_var "$CREDS_FILE" "LIBRECHAT_POSTGRES_USER" "postgres")
|
||||
backup_postgres "librechat-vectordb" "librechat" "$LIBRECHAT_PG_USER" "librechat-vectordb"
|
||||
# Also try the generic volume-based container name
|
||||
backup_postgres "vectordb" "librechat" "$LIBRECHAT_PG_USER" "librechat-vectordb"
|
||||
|
||||
# Orchestrator
|
||||
backup_postgres "orchestrator-db" "orchestrator" "orchestrator" "orchestrator"
|
||||
|
||||
# =============================================================================
|
||||
# 2. MYSQL / MARIADB DATABASE BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- MySQL/MariaDB Databases ---"
|
||||
|
||||
# WordPress (MariaDB)
|
||||
WP_USER=$(read_env_var "$CREDS_FILE" "WORDPRESS_DB_USER" "root")
|
||||
WP_PASS=$(read_env_var "$CREDS_FILE" "WORDPRESS_DB_PASSWORD" "")
|
||||
WP_ROOT_PASS=$(read_env_var "$CREDS_FILE" "WORDPRESS_MARIADB_ROOT_PASSWORD" "$WP_PASS")
|
||||
if [[ -n "$WP_ROOT_PASS" ]]; then
|
||||
backup_mysql "wordpress-mysql" "wordpress" "root" "$WP_ROOT_PASS" "wordpress"
|
||||
fi
|
||||
|
||||
# Ghost (MySQL)
|
||||
GHOST_PASS=$(read_env_var "$CREDS_FILE" "GHOST_MYSQL_PASSWORD" "")
|
||||
if [[ -n "$GHOST_PASS" ]]; then
|
||||
backup_mysql "ghost-db" "ghost" "root" "$GHOST_PASS" "ghost"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 3. MONGODB BACKUPS
|
||||
# =============================================================================
|
||||
|
||||
log "--- MongoDB Databases ---"
|
||||
|
||||
# LibreChat MongoDB
|
||||
backup_mongo "librechat-mongodb" "LibreChat" "librechat"
|
||||
|
||||
# =============================================================================
|
||||
# 4. ENV FILES BACKUP
|
||||
# =============================================================================
|
||||
|
||||
log "--- Configuration Backups ---"
|
||||
|
||||
backup_directory "${LETSBE_BASE}/env" "env-files"
|
||||
|
||||
# =============================================================================
|
||||
# 5. NGINX CONFIGS BACKUP
|
||||
# =============================================================================
|
||||
|
||||
backup_directory "${LETSBE_BASE}/nginx" "nginx-configs"
|
||||
|
||||
# Also backup active nginx sites
|
||||
if [[ -d "/etc/nginx/sites-enabled" ]]; then
|
||||
backup_directory "/etc/nginx/sites-enabled" "nginx-sites-enabled"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 6. TOOL CONFIGS BACKUP
|
||||
# =============================================================================
|
||||
|
||||
backup_directory "${LETSBE_BASE}/config" "letsbe-config"
|
||||
|
||||
# Backup rclone config if it exists
|
||||
if [[ -f "/root/.config/rclone/rclone.conf" ]]; then
|
||||
log "Backing up rclone config..."
|
||||
cp "/root/.config/rclone/rclone.conf" "${BACKUP_DIR}/rclone_conf_${DATE}.conf"
|
||||
FILES_BACKED_UP=$((FILES_BACKED_UP + 1))
|
||||
fi
|
||||
|
||||
# Backup crontab
|
||||
log "Backing up crontab..."
|
||||
crontab -l > "${BACKUP_DIR}/crontab_${DATE}.txt" 2>/dev/null && FILES_BACKED_UP=$((FILES_BACKED_UP + 1)) || true
|
||||
|
||||
# =============================================================================
|
||||
# 7. UPLOAD TO RCLONE REMOTE
|
||||
# =============================================================================
|
||||
|
||||
log "--- Remote Upload ---"
|
||||
|
||||
if command -v rclone &> /dev/null; then
|
||||
if rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
log "Uploading backups to ${RCLONE_REMOTE}:letsbe-backups/${DATE}/..."
|
||||
if rclone copy "$BACKUP_DIR" "${RCLONE_REMOTE}:letsbe-backups/${DATE}/" --quiet 2>&1; then
|
||||
log "Upload complete."
|
||||
else
|
||||
log_error "rclone upload failed"
|
||||
fi
|
||||
else
|
||||
log "WARNING: rclone remote '${RCLONE_REMOTE}' not configured. Backups stored locally only."
|
||||
fi
|
||||
else
|
||||
log "WARNING: rclone not installed. Backups stored locally only."
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 8. ROTATION: Keep 7 daily + 4 weekly
|
||||
# =============================================================================
|
||||
|
||||
log "--- Backup Rotation ---"
|
||||
|
||||
# Daily cleanup: remove files older than 7 days
|
||||
find "$BACKUP_DIR" -maxdepth 1 -type f -mtime +7 -delete 2>/dev/null || true
|
||||
log "Local daily rotation applied (7 days)."
|
||||
|
||||
# Weekly rotation on remote (keep 4 weeks)
|
||||
if command -v rclone &> /dev/null && rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
# If today is Sunday (day 7), copy today's backup as a weekly backup
|
||||
if [[ "$DAY_OF_WEEK" -eq 7 ]]; then
|
||||
WEEK_NUM=$(date +%Y-W%V)
|
||||
log "Creating weekly backup: ${WEEK_NUM}"
|
||||
rclone copy "${RCLONE_REMOTE}:letsbe-backups/${DATE}/" "${RCLONE_REMOTE}:letsbe-backups/weekly/${WEEK_NUM}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Remove daily remote backups older than 7 days
|
||||
# List remote directories and delete old ones
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/" 2>/dev/null | while read -r _ _ _ dirname; do
|
||||
# Skip 'weekly' directory
|
||||
[[ "$dirname" == "weekly" ]] && continue
|
||||
# Parse date from directory name (format: YYYYMMDD_HHMMSS)
|
||||
dir_date=$(echo "$dirname" | cut -c1-8)
|
||||
if [[ "$dir_date" =~ ^[0-9]{8}$ ]]; then
|
||||
dir_epoch=$(date -d "${dir_date:0:4}-${dir_date:4:2}-${dir_date:6:2}" +%s 2>/dev/null || echo "0")
|
||||
cutoff_epoch=$(date -d "7 days ago" +%s 2>/dev/null || echo "0")
|
||||
if [[ "$dir_epoch" -gt 0 && "$cutoff_epoch" -gt 0 && "$dir_epoch" -lt "$cutoff_epoch" ]]; then
|
||||
log "Removing old remote daily: $dirname"
|
||||
rclone purge "${RCLONE_REMOTE}:letsbe-backups/${dirname}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Remove weekly backups older than 4 weeks
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/weekly/" 2>/dev/null | while read -r _ _ _ dirname; do
|
||||
# Parse week from directory name (format: YYYY-WNN)
|
||||
week_year=$(echo "$dirname" | cut -d'-' -f1)
|
||||
week_num=$(echo "$dirname" | sed 's/.*W//')
|
||||
if [[ "$week_year" =~ ^[0-9]{4}$ && "$week_num" =~ ^[0-9]+$ ]]; then
|
||||
current_year=$(date +%Y)
|
||||
current_week=$(date +%V)
|
||||
# Calculate approximate age in weeks
|
||||
age_weeks=$(( (current_year - week_year) * 52 + (current_week - week_num) ))
|
||||
if [[ "$age_weeks" -gt 4 ]]; then
|
||||
log "Removing old remote weekly: $dirname"
|
||||
rclone purge "${RCLONE_REMOTE}:letsbe-backups/weekly/${dirname}/" --quiet 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# 9. STATUS FILE
|
||||
# =============================================================================
|
||||
|
||||
# Calculate total backup size
|
||||
TOTAL_SIZE_BYTES=$(du -sb "$BACKUP_DIR" 2>/dev/null | cut -f1 || echo "0")
|
||||
TOTAL_SIZE_MB=$(( TOTAL_SIZE_BYTES / 1048576 ))
|
||||
|
||||
# Determine status
|
||||
if [[ ${#ERRORS[@]} -eq 0 ]]; then
|
||||
STATUS="success"
|
||||
else
|
||||
STATUS="partial"
|
||||
fi
|
||||
|
||||
# Build errors JSON array
|
||||
ERRORS_JSON="[]"
|
||||
if [[ ${#ERRORS[@]} -gt 0 ]]; then
|
||||
ERRORS_JSON="["
|
||||
for i in "${!ERRORS[@]}"; do
|
||||
# Escape quotes in error messages
|
||||
escaped=$(echo "${ERRORS[$i]}" | sed 's/"/\\"/g')
|
||||
if [[ $i -gt 0 ]]; then
|
||||
ERRORS_JSON+=","
|
||||
fi
|
||||
ERRORS_JSON+="\"${escaped}\""
|
||||
done
|
||||
ERRORS_JSON+="]"
|
||||
fi
|
||||
|
||||
cat > "$STATUS_FILE" <<EOF
|
||||
{
|
||||
"last_run": "$(date -u '+%Y-%m-%dT%H:%M:%SZ')",
|
||||
"status": "${STATUS}",
|
||||
"size_mb": ${TOTAL_SIZE_MB},
|
||||
"files_backed_up": ${FILES_BACKED_UP},
|
||||
"errors": ${ERRORS_JSON}
|
||||
}
|
||||
EOF
|
||||
|
||||
# =============================================================================
|
||||
# DONE
|
||||
# =============================================================================
|
||||
|
||||
log "=== Backup Complete ==="
|
||||
log "Status: ${STATUS}"
|
||||
log "Files backed up: ${FILES_BACKED_UP}"
|
||||
log "Total size: ${TOTAL_SIZE_MB} MB"
|
||||
log "Local backups: ${BACKUP_DIR}"
|
||||
if [[ ${#ERRORS[@]} -gt 0 ]]; then
|
||||
log "Errors (${#ERRORS[@]}):"
|
||||
for err in "${ERRORS[@]}"; do
|
||||
log " - $err"
|
||||
done
|
||||
fi
|
||||
|
|
@ -0,0 +1,677 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# LetsBe Cloud Environment Setup Script
|
||||
# Non-interactive version for Orchestrator/SysAdmin Agent integration
|
||||
#
|
||||
# Usage:
|
||||
# ./env_setup.sh --customer "acme" --domain "acme.com" --company "Acme Corp"
|
||||
# ./env_setup.sh --json '{"customer":"acme","domain":"acme.com","company_name":"Acme Corp"}'
|
||||
# ./env_setup.sh --config /path/to/config.json
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
STACKS_DIR="${LETSBE_BASE}/stacks"
|
||||
NGINX_DIR="${LETSBE_BASE}/nginx"
|
||||
ENV_DIR="${LETSBE_BASE}/env"
|
||||
SCRIPTS_DIR="${LETSBE_BASE}/scripts"
|
||||
|
||||
# ============================================================================
|
||||
# HELPER FUNCTIONS
|
||||
# ============================================================================
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Required (one of):
|
||||
--customer NAME Customer name (lowercase, no spaces/hyphens/numbers)
|
||||
--domain DOMAIN Main domain without subdomains (lowercase)
|
||||
--company NAME Company name (can include spaces)
|
||||
|
||||
Or provide all via JSON:
|
||||
--json JSON_STRING JSON object with customer, domain, company_name
|
||||
--config FILE Path to JSON config file
|
||||
|
||||
Example:
|
||||
$0 --customer acme --domain acme.com --company "Acme Corporation"
|
||||
$0 --json '{"customer":"acme","domain":"acme.com","company_name":"Acme Corp"}'
|
||||
$0 --config /opt/letsbe/config/setup.json
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
log_info() {
|
||||
echo "[INFO] $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo "[ERROR] $*" >&2
|
||||
}
|
||||
|
||||
die() {
|
||||
log_error "$*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Generate random string of specified length
|
||||
generate_random_string() {
|
||||
local length=$1
|
||||
tr -dc A-Za-z0-9 </dev/urandom | head -c "${length}"
|
||||
echo ''
|
||||
}
|
||||
|
||||
# Validate required variables are set
|
||||
validate_required() {
|
||||
local var_name=$1
|
||||
local var_value=$2
|
||||
if [[ -z "${var_value}" ]]; then
|
||||
die "Required variable '${var_name}' is not set"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse JSON input using jq
|
||||
parse_json() {
|
||||
local json_input=$1
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
die "jq is required for JSON parsing. Install with: apt-get install jq"
|
||||
fi
|
||||
|
||||
customer=$(echo "${json_input}" | jq -r '.customer // empty')
|
||||
domain=$(echo "${json_input}" | jq -r '.domain // empty')
|
||||
company_name=$(echo "${json_input}" | jq -r '.company_name // empty')
|
||||
|
||||
# Hub / Licensing configuration
|
||||
license_key=$(echo "${json_input}" | jq -r '.license_key // empty')
|
||||
hub_url=$(echo "${json_input}" | jq -r '.hub_url // "https://hub.letsbe.biz"')
|
||||
hub_telemetry_enabled=$(echo "${json_input}" | jq -r '.hub_telemetry_enabled // true')
|
||||
|
||||
# Optional server IP override (auto-detected if not provided)
|
||||
local json_server_ip
|
||||
json_server_ip=$(echo "${json_input}" | jq -r '.server_ip // empty')
|
||||
if [[ -n "${json_server_ip}" ]]; then
|
||||
server_ip_override="${json_server_ip}"
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# ARGUMENT PARSING
|
||||
# ============================================================================
|
||||
|
||||
customer=""
|
||||
domain=""
|
||||
company_name=""
|
||||
docker_user=""
|
||||
server_ip_override=""
|
||||
|
||||
# Hub / Licensing configuration
|
||||
license_key=""
|
||||
hub_api_key=""
|
||||
hub_url="https://hub.letsbe.biz"
|
||||
hub_telemetry_enabled="true"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--customer)
|
||||
customer="$2"
|
||||
shift 2
|
||||
;;
|
||||
--domain)
|
||||
domain="$2"
|
||||
shift 2
|
||||
;;
|
||||
--company)
|
||||
company_name="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-user)
|
||||
docker_user="$2"
|
||||
shift 2
|
||||
;;
|
||||
--license-key)
|
||||
license_key="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-url)
|
||||
hub_url="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-api-key)
|
||||
hub_api_key="$2"
|
||||
shift 2
|
||||
;;
|
||||
--hub-telemetry)
|
||||
hub_telemetry_enabled="$2"
|
||||
shift 2
|
||||
;;
|
||||
--json)
|
||||
parse_json "$2"
|
||||
shift 2
|
||||
;;
|
||||
--config)
|
||||
if [[ ! -f "$2" ]]; then
|
||||
die "Config file not found: $2"
|
||||
fi
|
||||
parse_json "$(cat "$2")"
|
||||
shift 2
|
||||
;;
|
||||
--help|-h)
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ============================================================================
|
||||
# VALIDATION
|
||||
# ============================================================================
|
||||
|
||||
validate_required "customer" "${customer}"
|
||||
validate_required "domain" "${domain}"
|
||||
validate_required "company_name" "${company_name}"
|
||||
|
||||
# Validate customer format (lowercase, no spaces/hyphens/numbers)
|
||||
if [[ ! "${customer}" =~ ^[a-z]+$ ]]; then
|
||||
die "Customer name must be lowercase letters only, no spaces/hyphens/numbers: ${customer}"
|
||||
fi
|
||||
|
||||
# Validate domain format
|
||||
if [[ ! "${domain}" =~ ^[a-z0-9.-]+\.[a-z]{2,}$ ]]; then
|
||||
die "Invalid domain format: ${domain}"
|
||||
fi
|
||||
|
||||
log_info "Configuration validated"
|
||||
log_info " Customer: ${customer}"
|
||||
log_info " Domain: ${domain}"
|
||||
log_info " Company: ${company_name}"
|
||||
|
||||
# ============================================================================
|
||||
# DERIVED VARIABLES
|
||||
# ============================================================================
|
||||
|
||||
# Email for Let's Encrypt
|
||||
letsencrypt_email="postmaster@${domain}"
|
||||
|
||||
# Subdomains per tool
|
||||
domain_html="html.${domain}"
|
||||
domain_wordpress="${domain}"
|
||||
domain_squidex="contenthub.${domain}"
|
||||
domain_chatwoot="support.${domain}"
|
||||
domain_chatwoot_helpdesk="helpdesk.${domain}"
|
||||
domain_gitea="code.${domain}"
|
||||
domain_gitea_drone="ci.${domain}"
|
||||
domain_glitchtip="debug.${domain}"
|
||||
domain_listmonk="newsletters.${domain}"
|
||||
domain_n8n="n8n.${domain}"
|
||||
domain_nextcloud="cloud.${domain}"
|
||||
domain_penpot="design.${domain}"
|
||||
domain_poste="mail.${domain}"
|
||||
domain_umami="analytics.${domain}"
|
||||
domain_uptime_kuma="uptime.${domain}"
|
||||
domain_windmill="flows.${domain}"
|
||||
domain_calcom="bookings.${domain}"
|
||||
domain_odoo="crm.${domain}"
|
||||
domain_collabora="collabora.${domain}"
|
||||
domain_whiteboard="whiteboard.${domain}"
|
||||
domain_signaling="signaling.${domain}"
|
||||
domain_activepieces="automation.${domain}"
|
||||
domain_minio="minio.${domain}"
|
||||
domain_s3="s3.${domain}"
|
||||
domain_librechat="ai.${domain}"
|
||||
domain_bot_viewer="bots.${domain}"
|
||||
domain_botlab="botlab.${domain}"
|
||||
domain_nocodb="database.${domain}"
|
||||
domain_redash="data.${domain}"
|
||||
domain_documenso="signatures.${domain}"
|
||||
domain_keycloak="auth.${domain}"
|
||||
domain_pdf="pdf.${domain}"
|
||||
domain_portainer="portainer.${domain}"
|
||||
domain_ghost="${domain}"
|
||||
|
||||
# ============================================================================
|
||||
# GENERATED SECRETS
|
||||
# ============================================================================
|
||||
|
||||
log_info "Generating secrets and credentials..."
|
||||
|
||||
# WordPress
|
||||
wordpresss_mariadb_root_password=$(generate_random_string 20)
|
||||
wordpress_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
wordpress_db_password=$(generate_random_string 20)
|
||||
|
||||
# Squidex
|
||||
squidex_adminemail="postmaster@${domain}"
|
||||
squidex_adminpassword=$(generate_random_string 20)
|
||||
|
||||
# Listmonk
|
||||
listmonk_admin_username=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
listmonk_admin_password=$(generate_random_string 20)
|
||||
listmonk_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
listmonk_db_password=$(generate_random_string 20)
|
||||
|
||||
# Gitea
|
||||
gitea_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
gitea_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Umami
|
||||
umami_app_secret=$(generate_random_string 32)
|
||||
umami_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
umami_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Drone/Gitea
|
||||
drone_gitea_rpc_secret=$(generate_random_string 32)
|
||||
|
||||
# Windmill
|
||||
windmill_database_password=$(generate_random_string 20)
|
||||
|
||||
# Glitchtip
|
||||
glitchtip_database_password=$(generate_random_string 20)
|
||||
glitchtip_secret_key=$(generate_random_string 32)
|
||||
|
||||
# Penpot
|
||||
penpot_secret_key=$(generate_random_string 32)
|
||||
penpot_db_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
penpot_db_password=$(generate_random_string 20)
|
||||
|
||||
# Nextcloud
|
||||
nextcloud_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
nextcloud_postgres_password=$(generate_random_string 20)
|
||||
nextcloud_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
nextcloud_admin_password=$(generate_random_string 20)
|
||||
|
||||
# Collabora
|
||||
collabora_password=$(generate_random_string 20)
|
||||
collabora_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Nextcloud Talk HPB / TURN
|
||||
turn_secret=$(openssl rand -hex 32)
|
||||
signaling_secret=$(openssl rand -hex 32)
|
||||
internal_secret=$(openssl rand -hex 32)
|
||||
|
||||
# Server public IP (needed for coturn external-ip)
|
||||
if [[ -n "${server_ip_override}" ]]; then
|
||||
server_ip="${server_ip_override}"
|
||||
log_info "Using provided server IP: ${server_ip}"
|
||||
else
|
||||
server_ip=$(curl -4 -s --max-time 10 ifconfig.co || curl -4 -s --max-time 10 icanhazip.com || echo "REPLACE_WITH_SERVER_IP")
|
||||
log_info "Auto-detected server IP: ${server_ip}"
|
||||
fi
|
||||
|
||||
# Chatwoot
|
||||
chatwoot_secret_key_base=$(generate_random_string 32)
|
||||
chatwoot_redis_password=$(generate_random_string 20)
|
||||
chatwoot_postgres_username=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
chatwoot_postgres_password=$(generate_random_string 20)
|
||||
chatwoot_rails_inbound_email_password=$(generate_random_string 20)
|
||||
|
||||
# N8N
|
||||
n8n_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
n8n_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Cal.com
|
||||
calcom_nextauth_secret=$(generate_random_string 32)
|
||||
calcom_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
calcom_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Odoo
|
||||
odoo_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
odoo_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# Activepieces
|
||||
activepieces_api_key=$(generate_random_string 32)
|
||||
activepieces_encryption_key=$(generate_random_string 32 | tr '[:upper:]' '[:lower:]')
|
||||
activepieces_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
activepieces_postgres_password=$(generate_random_string 32)
|
||||
|
||||
# MinIO
|
||||
minio_root_user=$(generate_random_string 16)
|
||||
minio_root_password=$(generate_random_string 32)
|
||||
|
||||
# Typebot
|
||||
typebot_encryption_secret=$(generate_random_string 32)
|
||||
typebot_postgres_password=$(generate_random_string 20)
|
||||
|
||||
# NocoDB
|
||||
nocodb_postgres_password=$(generate_random_string 32)
|
||||
|
||||
# LibreChat
|
||||
librechat_postgres_password=$(generate_random_string 20)
|
||||
librechat_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
librechat_jwt_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
librechat_jwt_refresh_secret=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Redash
|
||||
redash_secret_key=$(generate_random_string 32)
|
||||
redash_cookie_secret=$(generate_random_string 32)
|
||||
redash_postgres_password=$(generate_random_string 20)
|
||||
redash_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Documenso
|
||||
documenso_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
documenso_postgres_password=$(generate_random_string 40)
|
||||
documenso_nextauth_secret=$(generate_random_string 32)
|
||||
documenso_encryption_key=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
documenso_encryption_secondary_key=$(generate_random_string 64 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Ghost
|
||||
ghost_mysql_password=$(generate_random_string 40)
|
||||
ghost_s3_access_key=$(generate_random_string 20)
|
||||
ghost_s3_secret_key=$(generate_random_string 40)
|
||||
|
||||
# Keycloak
|
||||
keycloak_postgres_password=$(generate_random_string 40)
|
||||
keycloak_admin_password=$(generate_random_string 40)
|
||||
keycloak_grafana_password=$(generate_random_string 40)
|
||||
|
||||
# Portainer (admin credentials for automated setup)
|
||||
# Note: --admin-password-file expects PLAIN TEXT password (not bcrypt hash)
|
||||
# The username is always "admin" - cannot be changed via CLI
|
||||
portainer_admin_user="admin"
|
||||
portainer_admin_password=$(generate_random_string 24)
|
||||
|
||||
# StirlingPDF
|
||||
stirlingpdf_postgres_user=$(generate_random_string 10 | tr '[:upper:]' '[:lower:]')
|
||||
stirlingpdf_postgres_password=$(generate_random_string 40)
|
||||
stirlingpdf_api_key=$(generate_random_string 40)
|
||||
|
||||
# Sysadmin Agent
|
||||
# Registration token is now auto-generated by local_bootstrap.sh after license validation
|
||||
# It calls the local orchestrator API to create a token
|
||||
# Legacy: can still be provided via SYSADMIN_REGISTRATION_TOKEN env var for backwards compatibility
|
||||
sysadmin_registration_token="${SYSADMIN_REGISTRATION_TOKEN:-PLACEHOLDER_GENERATED_BY_BOOTSTRAP}"
|
||||
# Legacy token (deprecated, kept for backward compatibility)
|
||||
sysadmin_agent_token=$(generate_random_string 64)
|
||||
|
||||
# ============================================================================
|
||||
# ORCHESTRATOR / HUB CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
# Orchestrator database password (for local orchestrator)
|
||||
orchestrator_db_password=$(generate_random_string 40)
|
||||
|
||||
# Admin API key for orchestrator management
|
||||
admin_api_key=$(generate_random_string 64)
|
||||
|
||||
# Instance ID (must match what's registered in Hub)
|
||||
instance_id="${customer}-orchestrator"
|
||||
|
||||
# Orchestrator URL for sysadmin agent
|
||||
# LOCAL_MODE uses host.docker.internal to reach orchestrator from agent container
|
||||
orchestrator_url="http://host.docker.internal:8100"
|
||||
|
||||
# Local agent key for LOCAL_MODE authentication
|
||||
# Shared secret between orchestrator and sysadmin agent
|
||||
local_agent_key=$(generate_random_string 64)
|
||||
|
||||
# Validate license key (required for official installations)
|
||||
if [[ -z "${license_key}" || "${license_key}" == "lb_inst_YOUR_LICENSE_KEY_HERE" ]]; then
|
||||
log_error "=============================================="
|
||||
log_error "LICENSE KEY REQUIRED"
|
||||
log_error "=============================================="
|
||||
log_error ""
|
||||
log_error "A valid license key is required for official LetsBe installations."
|
||||
log_error ""
|
||||
log_error "To obtain a license key:"
|
||||
log_error "1. Contact LetsBe to create a client account in Hub"
|
||||
log_error "2. Request an instance to be created for your deployment"
|
||||
log_error "3. Add the license_key to your config.json:"
|
||||
log_error " {\"license_key\": \"lb_inst_...\"}"
|
||||
log_error ""
|
||||
log_error "=============================================="
|
||||
die "Missing or invalid license_key in config.json"
|
||||
fi
|
||||
|
||||
log_info "Hub configuration:"
|
||||
log_info " License Key: ${license_key:0:12}..."
|
||||
log_info " Hub URL: ${hub_url}"
|
||||
log_info " Instance ID: ${instance_id}"
|
||||
log_info " Telemetry: ${hub_telemetry_enabled}"
|
||||
|
||||
# ============================================================================
|
||||
# TEMPLATE REPLACEMENT
|
||||
# ============================================================================
|
||||
|
||||
log_info "Replacing placeholders in template files..."
|
||||
|
||||
# Process all template files
|
||||
for file in "${STACKS_DIR}"/*/* "${STACKS_DIR}"/*/.* "${NGINX_DIR}"/* "${SCRIPTS_DIR}"/backups.sh; do
|
||||
if [[ -f "${file}" ]]; then
|
||||
# Core variables
|
||||
sed -i "s/{{ customer }}/${customer}/g" "${file}"
|
||||
sed -i "s/{{ domain }}/${domain}/g" "${file}"
|
||||
sed -i "s/{{ company_name }}/${company_name}/g" "${file}"
|
||||
sed -i "s/{{ letsencrypt_email }}/${letsencrypt_email}/g" "${file}"
|
||||
|
||||
# Domain variables
|
||||
sed -i "s/{{ domain_html }}/${domain_html}/g" "${file}"
|
||||
sed -i "s/{{ domain_wordpress }}/${domain_wordpress}/g" "${file}"
|
||||
sed -i "s/{{ domain_squidex }}/${domain_squidex}/g" "${file}"
|
||||
sed -i "s/{{ domain_chatwoot }}/${domain_chatwoot}/g" "${file}"
|
||||
sed -i "s/{{ domain_chatwoot_helpdesk }}/${domain_chatwoot_helpdesk}/g" "${file}"
|
||||
sed -i "s/{{ domain_gitea }}/${domain_gitea}/g" "${file}"
|
||||
sed -i "s/{{ domain_gitea_drone }}/${domain_gitea_drone}/g" "${file}"
|
||||
sed -i "s/{{ domain_glitchtip }}/${domain_glitchtip}/g" "${file}"
|
||||
sed -i "s/{{ domain_listmonk }}/${domain_listmonk}/g" "${file}"
|
||||
sed -i "s/{{ domain_librechat }}/${domain_librechat}/g" "${file}"
|
||||
sed -i "s/{{ domain_n8n }}/${domain_n8n}/g" "${file}"
|
||||
sed -i "s/{{ domain_nextcloud }}/${domain_nextcloud}/g" "${file}"
|
||||
sed -i "s/{{ domain_penpot }}/${domain_penpot}/g" "${file}"
|
||||
sed -i "s/{{ domain_poste }}/${domain_poste}/g" "${file}"
|
||||
sed -i "s/{{ domain_umami }}/${domain_umami}/g" "${file}"
|
||||
sed -i "s/{{ domain_uptime_kuma }}/${domain_uptime_kuma}/g" "${file}"
|
||||
sed -i "s/{{ domain_windmill }}/${domain_windmill}/g" "${file}"
|
||||
sed -i "s/{{ domain_calcom }}/${domain_calcom}/g" "${file}"
|
||||
sed -i "s/{{ domain_odoo }}/${domain_odoo}/g" "${file}"
|
||||
sed -i "s/{{ domain_collabora }}/${domain_collabora}/g" "${file}"
|
||||
sed -i "s/{{ domain_activepieces }}/${domain_activepieces}/g" "${file}"
|
||||
sed -i "s/{{ domain_bot_viewer }}/${domain_bot_viewer}/g" "${file}"
|
||||
sed -i "s/{{ domain_botlab }}/${domain_botlab}/g" "${file}"
|
||||
sed -i "s/{{ domain_minio }}/${domain_minio}/g" "${file}"
|
||||
sed -i "s/{{ domain_s3 }}/${domain_s3}/g" "${file}"
|
||||
sed -i "s/{{ domain_nocodb }}/${domain_nocodb}/g" "${file}"
|
||||
sed -i "s/{{ domain_whiteboard }}/${domain_whiteboard}/g" "${file}"
|
||||
sed -i "s/{{ domain_signaling }}/${domain_signaling}/g" "${file}"
|
||||
sed -i "s/{{ domain_redash }}/${domain_redash}/g" "${file}"
|
||||
sed -i "s/{{ domain_documenso }}/${domain_documenso}/g" "${file}"
|
||||
sed -i "s/{{ domain_keycloak }}/${domain_keycloak}/g" "${file}"
|
||||
sed -i "s/{{ domain_pdf }}/${domain_pdf}/g" "${file}"
|
||||
sed -i "s/{{ domain_portainer }}/${domain_portainer}/g" "${file}"
|
||||
sed -i "s/{{ domain_ghost }}/${domain_ghost}/g" "${file}"
|
||||
|
||||
# Credential variables
|
||||
sed -i "s/{{ wordpresss_mariadb_root_password }}/${wordpresss_mariadb_root_password}/g" "${file}"
|
||||
sed -i "s/{{ wordpress_db_user }}/${wordpress_db_user}/g" "${file}"
|
||||
sed -i "s/{{ wordpress_db_password }}/${wordpress_db_password}/g" "${file}"
|
||||
sed -i "s/{{ squidex_adminemail }}/${squidex_adminemail}/g" "${file}"
|
||||
sed -i "s/{{ squidex_adminpassword }}/${squidex_adminpassword}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_admin_username }}/${listmonk_admin_username}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_admin_password }}/${listmonk_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_db_user }}/${listmonk_db_user}/g" "${file}"
|
||||
sed -i "s/{{ listmonk_db_password }}/${listmonk_db_password}/g" "${file}"
|
||||
sed -i "s/{{ gitea_postgres_user }}/${gitea_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ gitea_postgres_password }}/${gitea_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ umami_app_secret }}/${umami_app_secret}/g" "${file}"
|
||||
sed -i "s/{{ umami_postgres_user }}/${umami_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ umami_postgres_password }}/${umami_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ drone_gitea_rpc_secret }}/${drone_gitea_rpc_secret}/g" "${file}"
|
||||
sed -i "s/{{ windmill_database_password }}/${windmill_database_password}/g" "${file}"
|
||||
sed -i "s/{{ glitchtip_database_password }}/${glitchtip_database_password}/g" "${file}"
|
||||
sed -i "s/{{ glitchtip_secret_key }}/${glitchtip_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ penpot_secret_key }}/${penpot_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ penpot_db_user }}/${penpot_db_user}/g" "${file}"
|
||||
sed -i "s/{{ penpot_db_password }}/${penpot_db_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_postgres_user }}/${nextcloud_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_postgres_password }}/${nextcloud_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_admin_password }}/${nextcloud_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ nextcloud_jwt_secret }}/${nextcloud_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ collabora_password }}/${collabora_password}/g" "${file}"
|
||||
sed -i "s/{{ collabora_user }}/${collabora_user}/g" "${file}"
|
||||
sed -i "s/{{ turn_secret }}/${turn_secret}/g" "${file}"
|
||||
sed -i "s/{{ signaling_secret }}/${signaling_secret}/g" "${file}"
|
||||
sed -i "s/{{ internal_secret }}/${internal_secret}/g" "${file}"
|
||||
sed -i "s/{{ server_ip }}/${server_ip}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_secret_key_base }}/${chatwoot_secret_key_base}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_redis_password }}/${chatwoot_redis_password}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_postgres_username }}/${chatwoot_postgres_username}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_postgres_password }}/${chatwoot_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ chatwoot_rails_inbound_email_password }}/${chatwoot_rails_inbound_email_password}/g" "${file}"
|
||||
sed -i "s/{{ n8n_postgres_user }}/${n8n_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ n8n_postgres_password }}/${n8n_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ calcom_nextauth_secret }}/${calcom_nextauth_secret}/g" "${file}"
|
||||
sed -i "s/{{ calcom_postgres_user }}/${calcom_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ calcom_postgres_password }}/${calcom_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ odoo_postgres_user }}/${odoo_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ odoo_postgres_password }}/${odoo_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_api_key }}/${activepieces_api_key}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_encryption_key }}/${activepieces_encryption_key}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_jwt_secret }}/${activepieces_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ activepieces_postgres_password }}/${activepieces_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ minio_root_user }}/${minio_root_user}/g" "${file}"
|
||||
sed -i "s/{{ minio_root_password }}/${minio_root_password}/g" "${file}"
|
||||
sed -i "s/{{ typebot_encryption_secret }}/${typebot_encryption_secret}/g" "${file}"
|
||||
sed -i "s/{{ nocodb_postgres_password }}/${nocodb_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ typebot_postgres_password }}/${typebot_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ redash_secret_key }}/${redash_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ redash_cookie_secret }}/${redash_cookie_secret}/g" "${file}"
|
||||
sed -i "s/{{ redash_postgres_user }}/${redash_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ redash_postgres_password }}/${redash_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ librechat_postgres_password }}/${librechat_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ librechat_postgres_user }}/${librechat_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ librechat_jwt_secret }}/${librechat_jwt_secret}/g" "${file}"
|
||||
sed -i "s/{{ librechat_jwt_refresh_secret }}/${librechat_jwt_refresh_secret}/g" "${file}"
|
||||
sed -i "s/{{ documenso_postgres_user }}/${documenso_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ documenso_postgres_password }}/${documenso_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ documenso_nextauth_secret }}/${documenso_nextauth_secret}/g" "${file}"
|
||||
sed -i "s/{{ documenso_encryption_key }}/${documenso_encryption_key}/g" "${file}"
|
||||
sed -i "s/{{ documenso_encryption_secondary_key }}/${documenso_encryption_secondary_key}/g" "${file}"
|
||||
sed -i "s/{{ ghost_mysql_password }}/${ghost_mysql_password}/g" "${file}"
|
||||
sed -i "s/{{ ghost_s3_access_key }}/${ghost_s3_access_key}/g" "${file}"
|
||||
sed -i "s/{{ ghost_s3_secret_key }}/${ghost_s3_secret_key}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_postgres_password }}/${keycloak_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_admin_password }}/${keycloak_admin_password}/g" "${file}"
|
||||
sed -i "s/{{ keycloak_grafana_password }}/${keycloak_grafana_password}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_postgres_user }}/${stirlingpdf_postgres_user}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_postgres_password }}/${stirlingpdf_postgres_password}/g" "${file}"
|
||||
sed -i "s/{{ stirlingpdf_api_key }}/${stirlingpdf_api_key}/g" "${file}"
|
||||
sed -i "s/{{ sysadmin_agent_token }}/${sysadmin_agent_token}/g" "${file}"
|
||||
sed -i "s/{{ sysadmin_registration_token }}/${sysadmin_registration_token}/g" "${file}"
|
||||
|
||||
# Hub / Orchestrator variables
|
||||
sed -i "s/{{ license_key }}/${license_key}/g" "${file}"
|
||||
sed -i "s/{{ hub_api_key }}/${hub_api_key}/g" "${file}"
|
||||
sed -i "s|{{ hub_url }}|${hub_url}|g" "${file}"
|
||||
sed -i "s/{{ hub_telemetry_enabled }}/${hub_telemetry_enabled}/g" "${file}"
|
||||
sed -i "s/{{ instance_id }}/${instance_id}/g" "${file}"
|
||||
sed -i "s/{{ orchestrator_db_password }}/${orchestrator_db_password}/g" "${file}"
|
||||
sed -i "s/{{ admin_api_key }}/${admin_api_key}/g" "${file}"
|
||||
sed -i "s|{{ orchestrator_url }}|${orchestrator_url}|g" "${file}"
|
||||
sed -i "s/{{ local_agent_key }}/${local_agent_key}/g" "${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
log_info "All placeholders replaced successfully."
|
||||
|
||||
# ============================================================================
|
||||
# GENERATE ENV FILES
|
||||
# ============================================================================
|
||||
|
||||
log_info "Generating centralized environment files..."
|
||||
|
||||
mkdir -p "${ENV_DIR}"
|
||||
|
||||
# Write master credentials file for reference
|
||||
cat > "${ENV_DIR}/credentials.env" <<EOF
|
||||
# LetsBe Cloud Credentials - Generated $(date -Iseconds)
|
||||
# Customer: ${customer}
|
||||
# Domain: ${domain}
|
||||
# Company: ${company_name}
|
||||
#
|
||||
# KEEP THIS FILE SECURE - Contains all generated passwords
|
||||
#
|
||||
|
||||
# WordPress
|
||||
WORDPRESS_DB_USER=${wordpress_db_user}
|
||||
WORDPRESS_DB_PASSWORD=${wordpress_db_password}
|
||||
WORDPRESS_MARIADB_ROOT_PASSWORD=${wordpresss_mariadb_root_password}
|
||||
|
||||
# Nextcloud
|
||||
NEXTCLOUD_ADMIN_PASSWORD=${nextcloud_admin_password}
|
||||
NEXTCLOUD_POSTGRES_USER=${nextcloud_postgres_user}
|
||||
NEXTCLOUD_POSTGRES_PASSWORD=${nextcloud_postgres_password}
|
||||
|
||||
# Nextcloud Talk HPB / TURN
|
||||
TURN_SECRET=${turn_secret}
|
||||
SIGNALING_SECRET=${signaling_secret}
|
||||
INTERNAL_SECRET=${internal_secret}
|
||||
SERVER_IP=${server_ip}
|
||||
|
||||
# Listmonk
|
||||
LISTMONK_ADMIN_USER=${listmonk_admin_username}
|
||||
LISTMONK_ADMIN_PASSWORD=${listmonk_admin_password}
|
||||
|
||||
# MinIO
|
||||
MINIO_ROOT_USER=${minio_root_user}
|
||||
MINIO_ROOT_PASSWORD=${minio_root_password}
|
||||
|
||||
# Keycloak
|
||||
KEYCLOAK_ADMIN_PASSWORD=${keycloak_admin_password}
|
||||
|
||||
# Portainer
|
||||
PORTAINER_ADMIN_USER=${portainer_admin_user}
|
||||
PORTAINER_ADMIN_PASSWORD=${portainer_admin_password}
|
||||
|
||||
# Sysadmin Agent
|
||||
# Note: Registration token is generated by local_bootstrap.sh after license validation.
|
||||
# It calls the local orchestrator to create a one-time registration token.
|
||||
# After initial registration, agent credentials are persisted to ~/.letsbe-agent/credentials.json
|
||||
# SYSADMIN_REGISTRATION_TOKEN will be written to sysadmin-credentials.env by bootstrap
|
||||
# SYSADMIN_AGENT_TOKEN=${sysadmin_agent_token} # Deprecated
|
||||
|
||||
# ============ HUB / ORCHESTRATOR ============
|
||||
# License key (validated during bootstrap)
|
||||
LICENSE_KEY=${license_key}
|
||||
HUB_API_KEY=${hub_api_key}
|
||||
HUB_URL=${hub_url}
|
||||
HUB_TELEMETRY_ENABLED=${hub_telemetry_enabled}
|
||||
INSTANCE_ID=${instance_id}
|
||||
|
||||
# Local orchestrator credentials
|
||||
ORCHESTRATOR_DB_PASSWORD=${orchestrator_db_password}
|
||||
ADMIN_API_KEY=${admin_api_key}
|
||||
|
||||
# Local agent authentication (shared between orchestrator and sysadmin agent)
|
||||
LOCAL_AGENT_KEY=${local_agent_key}
|
||||
EOF
|
||||
|
||||
# Add Docker Hub section if docker_user was provided
|
||||
if [[ -n "${docker_user}" ]]; then
|
||||
cat >> "${ENV_DIR}/credentials.env" <<EOF
|
||||
|
||||
# Docker Hub
|
||||
DOCKER_HUB_USER=${docker_user}
|
||||
# Note: Token not stored for security - regenerate from Docker Hub if needed
|
||||
EOF
|
||||
fi
|
||||
|
||||
chmod 600 "${ENV_DIR}/credentials.env"
|
||||
|
||||
# Write Portainer admin password file (for automated admin setup)
|
||||
# This file is mounted into the Portainer container and used with --admin-password-file
|
||||
# NOTE: --admin-password-file expects PLAIN TEXT password, NOT bcrypt hash
|
||||
# (--admin-password expects bcrypt hash, but --admin-password-file expects plain text)
|
||||
printf '%s' "${portainer_admin_password}" > "${ENV_DIR}/portainer_admin_password.txt"
|
||||
chmod 600 "${ENV_DIR}/portainer_admin_password.txt"
|
||||
log_info "Portainer admin password file saved to: ${ENV_DIR}/portainer_admin_password.txt"
|
||||
|
||||
log_info "Environment setup complete."
|
||||
log_info "Credentials saved to: ${ENV_DIR}/credentials.env"
|
||||
|
|
@ -0,0 +1,258 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Local Orchestrator Bootstrap (with License Validation)
|
||||
#
|
||||
# This script runs AFTER docker-compose up to:
|
||||
# 1. VALIDATE LICENSE with LetsBe Hub (REQUIRED for official installations)
|
||||
# 2. Wait for orchestrator health check (includes migrations via container startup)
|
||||
# 3. Get local tenant ID (for verification/logging)
|
||||
# 4. Write simplified credentials file
|
||||
#
|
||||
# NOTE: Database migrations are now run by the orchestrator container on startup
|
||||
#
|
||||
# IMPORTANT: Agent registration is handled via LOCAL_AGENT_KEY
|
||||
# from docker-compose.yml environment, NOT registration tokens.
|
||||
#
|
||||
# This script is idempotent - safe to run multiple times.
|
||||
#
|
||||
# Usage:
|
||||
# HUB_URL="https://hub.letsbe.biz" \
|
||||
# LICENSE_KEY="lb_inst_..." \
|
||||
# INSTANCE_ID="acme-orchestrator" \
|
||||
# ADMIN_API_KEY="admin_key" \
|
||||
# CUSTOMER="acme" \
|
||||
# bash local_bootstrap.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============ CONFIGURATION ============
|
||||
|
||||
HUB_URL="${HUB_URL:-https://hub.letsbe.biz}"
|
||||
LICENSE_KEY="${LICENSE_KEY:-}"
|
||||
INSTANCE_ID="${INSTANCE_ID:?INSTANCE_ID required}"
|
||||
ORCHESTRATOR_URL="${ORCHESTRATOR_URL:-http://localhost:8100}"
|
||||
ADMIN_API_KEY="${ADMIN_API_KEY:?ADMIN_API_KEY required}"
|
||||
CUSTOMER="${CUSTOMER:?CUSTOMER required}"
|
||||
CREDENTIALS_DIR="${CREDENTIALS_DIR:-/opt/letsbe/env}"
|
||||
|
||||
# ============ LOGGING ============
|
||||
|
||||
log_info() { echo "[BOOTSTRAP] $(date '+%Y-%m-%d %H:%M:%S') $*"; }
|
||||
log_error() { echo "[BOOTSTRAP-ERROR] $(date '+%Y-%m-%d %H:%M:%S') $*" >&2; }
|
||||
log_success() { echo "[BOOTSTRAP-OK] $(date '+%Y-%m-%d %H:%M:%S') $*"; }
|
||||
log_warn() { echo "[BOOTSTRAP-WARN] $(date '+%Y-%m-%d %H:%M:%S') $*" >&2; }
|
||||
|
||||
# ============ LICENSE VALIDATION (FIRST STEP) ============
|
||||
|
||||
validate_license() {
|
||||
log_info "Validating license with LetsBe Hub..."
|
||||
|
||||
# Check if license key is provided
|
||||
if [ -z "$LICENSE_KEY" ]; then
|
||||
log_error "LICENSE_KEY is required but not provided."
|
||||
log_error "Please obtain a license key from LetsBe Hub."
|
||||
log_error "Add 'license_key' to your config.json and re-run provisioning."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Hub URL is configured
|
||||
if [ -z "$HUB_URL" ]; then
|
||||
log_error "HUB_URL is required but not provided."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Call Hub activation endpoint
|
||||
local http_code
|
||||
http_code=$(curl -s -o /tmp/activation_response.json -w "%{http_code}" \
|
||||
-X POST "${HUB_URL}/api/v1/instances/activate" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"license_key\": \"${LICENSE_KEY}\", \"instance_id\": \"${INSTANCE_ID}\"}")
|
||||
|
||||
if [ "$http_code" != "200" ]; then
|
||||
log_error "License validation failed (HTTP $http_code)"
|
||||
|
||||
# Parse error response
|
||||
if [ -f /tmp/activation_response.json ]; then
|
||||
local error_msg
|
||||
local error_code
|
||||
|
||||
# Try to parse JSON error response
|
||||
error_msg=$(jq -r '.error // .detail.error // "Unknown error"' /tmp/activation_response.json 2>/dev/null || echo "Unknown error")
|
||||
error_code=$(jq -r '.code // .detail.code // "unknown"' /tmp/activation_response.json 2>/dev/null || echo "unknown")
|
||||
|
||||
log_error "Error: $error_msg (code: $error_code)"
|
||||
|
||||
case "$error_code" in
|
||||
"invalid_license")
|
||||
log_error "The provided license key is invalid."
|
||||
log_error "Please verify your license_key in config.json."
|
||||
;;
|
||||
"expired")
|
||||
log_error "Your license has expired."
|
||||
log_error "Please contact LetsBe to renew your license."
|
||||
;;
|
||||
"suspended")
|
||||
log_error "Your license has been suspended."
|
||||
log_error "Please contact LetsBe support."
|
||||
;;
|
||||
"instance_not_found")
|
||||
log_error "Instance ID '$INSTANCE_ID' not found in Hub."
|
||||
log_error "Please ensure your instance was created in LetsBe Hub."
|
||||
;;
|
||||
*)
|
||||
log_error "Please contact LetsBe support with error code: $error_code"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
rm -f /tmp/activation_response.json
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_success "License validated successfully!"
|
||||
|
||||
# Extract hub_api_key from response if provided
|
||||
local hub_api_key
|
||||
hub_api_key=$(jq -r '.hub_api_key // empty' /tmp/activation_response.json 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$hub_api_key" ] && [ "$hub_api_key" != "USE_EXISTING" ]; then
|
||||
log_info "Received hub_api_key from activation"
|
||||
export HUB_API_KEY="$hub_api_key"
|
||||
|
||||
# Save to credentials file
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
echo "HUB_API_KEY=${hub_api_key}" >> "${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
chmod 600 "${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
log_info "Hub API key saved to ${CREDENTIALS_DIR}/hub-credentials.env"
|
||||
fi
|
||||
|
||||
rm -f /tmp/activation_response.json
|
||||
}
|
||||
|
||||
# ============ ORCHESTRATOR FUNCTIONS ============
|
||||
|
||||
wait_for_orchestrator() {
|
||||
log_info "Waiting for orchestrator to be ready..."
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
while [[ $attempt -lt $max_attempts ]]; do
|
||||
if curl -sf "${ORCHESTRATOR_URL}/health" > /dev/null 2>&1; then
|
||||
log_success "Orchestrator is ready"
|
||||
return 0
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
log_info "Attempt $attempt/$max_attempts - waiting..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
log_error "Orchestrator not ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
run_migrations() {
|
||||
log_info "Running database migrations..."
|
||||
|
||||
# Find the orchestrator container
|
||||
local orchestrator_container
|
||||
orchestrator_container=$(docker ps --format '{{.Names}}' | grep -E "(orchestrator|${CUSTOMER}.*orchestrator)" | head -1)
|
||||
|
||||
if [ -z "$orchestrator_container" ]; then
|
||||
log_error "Could not find orchestrator container"
|
||||
return 1
|
||||
fi
|
||||
|
||||
docker exec "$orchestrator_container" alembic upgrade head
|
||||
log_success "Migrations complete"
|
||||
}
|
||||
|
||||
get_local_tenant_id() {
|
||||
log_info "Getting local tenant ID..."
|
||||
local response
|
||||
response=$(curl -sf "${ORCHESTRATOR_URL}/api/v1/meta/instance")
|
||||
local tenant_id
|
||||
tenant_id=$(echo "$response" | jq -r '.tenant_id')
|
||||
|
||||
if [ "$tenant_id" == "null" ] || [ -z "$tenant_id" ]; then
|
||||
log_error "Failed to get tenant_id from /api/v1/meta/instance"
|
||||
log_error "Response: $response"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_success "Tenant ID: $tenant_id"
|
||||
echo "$tenant_id"
|
||||
}
|
||||
|
||||
write_credentials() {
|
||||
local tenant_id=$1
|
||||
local credentials_file="${CREDENTIALS_DIR}/sysadmin-credentials.env"
|
||||
|
||||
log_info "Writing credentials to ${credentials_file}..."
|
||||
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
|
||||
# NOTE: In LOCAL_MODE, agent uses LOCAL_AGENT_KEY from docker-compose.yml
|
||||
# We do NOT write ADMIN_API_KEY here - agent doesn't need it
|
||||
# We do NOT write registration tokens - LOCAL_MODE uses direct key auth
|
||||
cat > "${credentials_file}" <<EOF
|
||||
# LetsBe LOCAL_MODE Credentials
|
||||
# Generated by local_bootstrap.sh at $(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
#
|
||||
# Agent Registration: Uses LOCAL_AGENT_KEY from docker-compose.yml
|
||||
# NOT stored here to minimize secrets at rest
|
||||
TENANT_ID=${tenant_id}
|
||||
INSTANCE_ID=${INSTANCE_ID}
|
||||
EOF
|
||||
|
||||
chmod 600 "${credentials_file}"
|
||||
log_success "Credentials written"
|
||||
}
|
||||
|
||||
write_admin_credentials() {
|
||||
# Separate file for admin scripts only - NOT for agent
|
||||
local admin_creds="${CREDENTIALS_DIR}/admin-credentials.env"
|
||||
|
||||
log_info "Writing admin credentials to ${admin_creds}..."
|
||||
|
||||
mkdir -p "${CREDENTIALS_DIR}"
|
||||
cat > "${admin_creds}" <<EOF
|
||||
# LetsBe Admin Credentials (root only)
|
||||
# Generated by local_bootstrap.sh at $(date -u '+%Y-%m-%dT%H:%M:%SZ')
|
||||
ADMIN_API_KEY=${ADMIN_API_KEY}
|
||||
EOF
|
||||
|
||||
chmod 600 "${admin_creds}"
|
||||
chown root:root "${admin_creds}" 2>/dev/null || true
|
||||
log_success "Admin credentials written (root-only access)"
|
||||
}
|
||||
|
||||
# ============ MAIN ============
|
||||
|
||||
main() {
|
||||
log_info "Starting local orchestrator bootstrap for: ${CUSTOMER}"
|
||||
log_info "Instance ID: ${INSTANCE_ID}"
|
||||
|
||||
# STEP 1: License validation (REQUIRED for official installations)
|
||||
# This is the gating step - if license fails, nothing else runs
|
||||
validate_license
|
||||
|
||||
# STEP 2: Wait for orchestrator (migrations run on container startup)
|
||||
wait_for_orchestrator
|
||||
|
||||
# STEP 3: Get tenant ID (for verification)
|
||||
local tenant_id
|
||||
tenant_id=$(get_local_tenant_id)
|
||||
|
||||
# STEP 4: Write credentials
|
||||
# NOTE: No registration token creation - LOCAL_MODE uses LOCAL_AGENT_KEY
|
||||
write_credentials "${tenant_id}"
|
||||
write_admin_credentials
|
||||
|
||||
log_success "Bootstrap complete!"
|
||||
log_info "Instance '${INSTANCE_ID}' is now licensed and activated"
|
||||
log_info ""
|
||||
log_info "Agent registration: Uses LOCAL_AGENT_KEY from docker-compose.yml"
|
||||
log_info "Agent should register with orchestrator within 30 seconds"
|
||||
}
|
||||
|
||||
main "$@"
|
||||
|
|
@ -0,0 +1,511 @@
|
|||
#!/bin/bash
|
||||
# =============================================================================
|
||||
# LetsBe Restore Script
|
||||
# =============================================================================
|
||||
#
|
||||
# Restores backups created by backups.sh.
|
||||
#
|
||||
# Usage:
|
||||
# restore.sh list List available local backups
|
||||
# restore.sh list-remote List available remote backups
|
||||
# restore.sh download <DATE> Download a remote backup set locally
|
||||
# restore.sh postgres <TOOL> <FILE> Restore a PostgreSQL database
|
||||
# restore.sh mysql <TOOL> <FILE> Restore a MySQL/MariaDB database
|
||||
# restore.sh mongo <TOOL> <FILE> Restore a MongoDB database
|
||||
# restore.sh env <FILE> Restore env files
|
||||
# restore.sh configs <FILE> Restore config files
|
||||
# restore.sh nginx <FILE> Restore nginx configs
|
||||
# restore.sh full <DATE> Full restore from a backup date
|
||||
#
|
||||
# Examples:
|
||||
# restore.sh list
|
||||
# restore.sh postgres chatwoot /tmp/letsbe-backups/pg_chatwoot_20260207_020000.sql.gz
|
||||
# restore.sh env /tmp/letsbe-backups/dir_env-files_20260207_020000.tar.gz
|
||||
# restore.sh download 20260207_020000
|
||||
# restore.sh full 20260207_020000
|
||||
#
|
||||
# IMPORTANT:
|
||||
# - Always stop the tool's application containers before restoring its database.
|
||||
# - Database containers must remain running during restore.
|
||||
# - After restore, restart the full tool stack.
|
||||
#
|
||||
# =============================================================================
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
LETSBE_BASE="/opt/letsbe"
|
||||
BACKUP_DIR="/tmp/letsbe-backups"
|
||||
RCLONE_REMOTE="backup"
|
||||
|
||||
# =============================================================================
|
||||
# HELPERS
|
||||
# =============================================================================
|
||||
|
||||
log() {
|
||||
echo "[RESTORE] $*"
|
||||
}
|
||||
|
||||
die() {
|
||||
echo "[RESTORE ERROR] $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
require_file() {
|
||||
local file=$1
|
||||
[[ -f "$file" ]] || die "File not found: $file"
|
||||
}
|
||||
|
||||
# Find a running container by suffix pattern
|
||||
find_container() {
|
||||
local pattern=$1
|
||||
docker ps --format '{{.Names}}' | grep -E "(^|-)${pattern}$" | head -1
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# COMMANDS
|
||||
# =============================================================================
|
||||
|
||||
cmd_list() {
|
||||
log "Available local backups in ${BACKUP_DIR}:"
|
||||
echo ""
|
||||
if [[ -d "$BACKUP_DIR" ]]; then
|
||||
ls -lhS "$BACKUP_DIR"/ 2>/dev/null || echo " (empty)"
|
||||
else
|
||||
echo " No backup directory found."
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_list_remote() {
|
||||
if ! command -v rclone &> /dev/null; then
|
||||
die "rclone not installed"
|
||||
fi
|
||||
if ! rclone listremotes 2>/dev/null | grep -q "^${RCLONE_REMOTE}:"; then
|
||||
die "rclone remote '${RCLONE_REMOTE}' not configured"
|
||||
fi
|
||||
|
||||
log "Available remote backups:"
|
||||
echo ""
|
||||
echo "Daily:"
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/" 2>/dev/null | grep -v "weekly" | awk '{print " " $NF}'
|
||||
echo ""
|
||||
echo "Weekly:"
|
||||
rclone lsd "${RCLONE_REMOTE}:letsbe-backups/weekly/" 2>/dev/null | awk '{print " " $NF}'
|
||||
}
|
||||
|
||||
cmd_download() {
|
||||
local date_str=$1
|
||||
if ! command -v rclone &> /dev/null; then
|
||||
die "rclone not installed"
|
||||
fi
|
||||
|
||||
local remote_path="${RCLONE_REMOTE}:letsbe-backups/${date_str}/"
|
||||
log "Downloading backup from ${remote_path}..."
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
rclone copy "$remote_path" "$BACKUP_DIR/" --progress
|
||||
log "Download complete. Files in ${BACKUP_DIR}/"
|
||||
}
|
||||
|
||||
cmd_restore_postgres() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
# Map tool name to container suffix, db name, and user
|
||||
local container db_name db_user
|
||||
case "$tool" in
|
||||
chatwoot) container="chatwoot-postgres"; db_name="chatwoot_production"; db_user="chatwoot" ;;
|
||||
nextcloud) container="nextcloud-postgres"; db_name="nextcloud"; db_user="nextcloud" ;;
|
||||
keycloak) container="keycloak-db"; db_name="keycloak"; db_user="keycloak" ;;
|
||||
n8n) container="n8n-postgres"; db_name="n8n"; db_user="postgres" ;;
|
||||
calcom) container="calcom-postgres"; db_name="calcom"; db_user="postgres" ;;
|
||||
umami) container="umami-db"; db_name="umami"; db_user="postgres" ;;
|
||||
nocodb) container="nocodb-postgres"; db_name="nocodb"; db_user="postgres" ;;
|
||||
typebot) container="typebot-db"; db_name="typebot"; db_user="postgres" ;;
|
||||
windmill) container="windmill-db"; db_name="windmill"; db_user="postgres" ;;
|
||||
glitchtip) container="glitchtip-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
penpot) container="penpot-postgres"; db_name="penpot"; db_user="postgres" ;;
|
||||
gitea) container="gitea-db"; db_name="gitea"; db_user="postgres" ;;
|
||||
odoo) container="odoo-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
listmonk) container="listmonk-db"; db_name="listmonk"; db_user="postgres" ;;
|
||||
documenso) container="documenso-db"; db_name="documenso_db"; db_user="postgres" ;;
|
||||
redash) container="redash-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
activepieces) container="activepieces-postgres"; db_name="activepieces"; db_user="postgres" ;;
|
||||
orchestrator) container="orchestrator-db"; db_name="orchestrator"; db_user="orchestrator" ;;
|
||||
*) die "Unknown PostgreSQL tool: $tool. Use one of: chatwoot, nextcloud, keycloak, n8n, calcom, umami, nocodb, typebot, windmill, glitchtip, penpot, gitea, odoo, listmonk, documenso, redash, activepieces, orchestrator" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring PostgreSQL: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will DROP and recreate database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Dropping and recreating database..."
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$db_name' AND pid <> pg_backend_pid();" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "DROP DATABASE IF EXISTS \"$db_name\";" postgres
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "CREATE DATABASE \"$db_name\";" postgres
|
||||
|
||||
log "Restoring from backup..."
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" psql -U "$db_user" "$db_name"
|
||||
else
|
||||
docker exec -i "$actual_container" psql -U "$db_user" "$db_name" < "$file"
|
||||
fi
|
||||
|
||||
log "PostgreSQL restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_mysql() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
local container db_name db_user
|
||||
case "$tool" in
|
||||
wordpress) container="wordpress-mysql"; db_name="wordpress"; db_user="root" ;;
|
||||
ghost) container="ghost-db"; db_name="ghost"; db_user="root" ;;
|
||||
*) die "Unknown MySQL tool: $tool. Use one of: wordpress, ghost" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring MySQL: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Restoring from backup..."
|
||||
# Read root password from credentials
|
||||
local creds_file="${LETSBE_BASE}/env/credentials.env"
|
||||
local db_pass=""
|
||||
if [[ "$tool" == "wordpress" ]]; then
|
||||
db_pass=$(grep "^WORDPRESS_MARIADB_ROOT_PASSWORD=" "$creds_file" 2>/dev/null | cut -d'=' -f2-)
|
||||
elif [[ "$tool" == "ghost" ]]; then
|
||||
db_pass=$(grep "^GHOST_MYSQL_PASSWORD=" "$creds_file" 2>/dev/null | cut -d'=' -f2-)
|
||||
fi
|
||||
[[ -z "$db_pass" ]] && die "Could not read database password from $creds_file"
|
||||
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" mysql -u"$db_user" -p"$db_pass" "$db_name"
|
||||
else
|
||||
docker exec -i "$actual_container" mysql -u"$db_user" -p"$db_pass" "$db_name" < "$file"
|
||||
fi
|
||||
|
||||
log "MySQL restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_mongo() {
|
||||
local tool=$1
|
||||
local file=$2
|
||||
require_file "$file"
|
||||
|
||||
local container db_name
|
||||
case "$tool" in
|
||||
librechat) container="librechat-mongodb"; db_name="LibreChat" ;;
|
||||
*) die "Unknown MongoDB tool: $tool. Use: librechat" ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
[[ -z "$actual_container" ]] && die "Container matching '$container' not found. Is it running?"
|
||||
|
||||
log "Restoring MongoDB: $tool"
|
||||
log " Container: $actual_container"
|
||||
log " Database: $db_name"
|
||||
log " File: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will drop and restore database '$db_name'. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
log "Restoring from backup..."
|
||||
if [[ "$file" == *.gz ]]; then
|
||||
gunzip -c "$file" | docker exec -i "$actual_container" mongorestore --db "$db_name" --drop --archive
|
||||
else
|
||||
docker exec -i "$actual_container" mongorestore --db "$db_name" --drop --archive < "$file"
|
||||
fi
|
||||
|
||||
log "MongoDB restore complete for $tool."
|
||||
log "Restart the $tool application containers to reconnect."
|
||||
}
|
||||
|
||||
cmd_restore_env() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring env files from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/env/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
# Backup current env files first
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/env" ]]; then
|
||||
log "Backing up current env files to ${LETSBE_BASE}/env.pre-restore.${timestamp}..."
|
||||
cp -a "${LETSBE_BASE}/env" "${LETSBE_BASE}/env.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
log "Extracting..."
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
chmod 600 "${LETSBE_BASE}/env/"*.env 2>/dev/null || true
|
||||
log "Env files restored."
|
||||
}
|
||||
|
||||
cmd_restore_configs() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring config files from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/config/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/config" ]]; then
|
||||
cp -a "${LETSBE_BASE}/config" "${LETSBE_BASE}/config.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
log "Config files restored."
|
||||
}
|
||||
|
||||
cmd_restore_nginx() {
|
||||
local file=$1
|
||||
require_file "$file"
|
||||
|
||||
log "Restoring nginx configs from: $file"
|
||||
echo ""
|
||||
read -p "WARNING: This will overwrite files in ${LETSBE_BASE}/nginx/. Continue? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Restore cancelled."
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
if [[ -d "${LETSBE_BASE}/nginx" ]]; then
|
||||
cp -a "${LETSBE_BASE}/nginx" "${LETSBE_BASE}/nginx.pre-restore.${timestamp}"
|
||||
fi
|
||||
|
||||
tar xzf "$file" -C "${LETSBE_BASE}/"
|
||||
log "Nginx configs restored."
|
||||
log "Run: systemctl restart nginx"
|
||||
}
|
||||
|
||||
cmd_full_restore() {
|
||||
local date_str=$1
|
||||
local backup_path="$BACKUP_DIR"
|
||||
|
||||
log "=== Full System Restore for date: $date_str ==="
|
||||
echo ""
|
||||
echo "This will restore ALL databases and configuration files from the backup."
|
||||
echo "Make sure all tool containers are stopped (except database containers)."
|
||||
echo ""
|
||||
read -p "Continue with full restore? (yes/no): " confirm
|
||||
[[ "$confirm" == "yes" ]] || die "Full restore cancelled."
|
||||
|
||||
# Check if files exist locally, download if not
|
||||
local pg_count
|
||||
pg_count=$(ls "${backup_path}"/pg_*"${date_str}"* 2>/dev/null | wc -l)
|
||||
if [[ "$pg_count" -eq 0 ]]; then
|
||||
log "Backup files not found locally. Attempting remote download..."
|
||||
cmd_download "$date_str"
|
||||
fi
|
||||
|
||||
# Restore env files
|
||||
local env_file="${backup_path}/dir_env-files_${date_str}.tar.gz"
|
||||
if [[ -f "$env_file" ]]; then
|
||||
log "Restoring env files..."
|
||||
# Non-interactive for full restore (already confirmed above)
|
||||
local timestamp
|
||||
timestamp=$(date +%Y%m%d_%H%M%S)
|
||||
[[ -d "${LETSBE_BASE}/env" ]] && cp -a "${LETSBE_BASE}/env" "${LETSBE_BASE}/env.pre-restore.${timestamp}"
|
||||
tar xzf "$env_file" -C "${LETSBE_BASE}/"
|
||||
chmod 600 "${LETSBE_BASE}/env/"*.env 2>/dev/null || true
|
||||
log " Env files restored."
|
||||
fi
|
||||
|
||||
# Restore configs
|
||||
local cfg_file="${backup_path}/dir_letsbe-config_${date_str}.tar.gz"
|
||||
if [[ -f "$cfg_file" ]]; then
|
||||
log "Restoring config files..."
|
||||
tar xzf "$cfg_file" -C "${LETSBE_BASE}/"
|
||||
log " Config files restored."
|
||||
fi
|
||||
|
||||
# Restore nginx configs
|
||||
local nginx_file="${backup_path}/dir_nginx-configs_${date_str}.tar.gz"
|
||||
if [[ -f "$nginx_file" ]]; then
|
||||
log "Restoring nginx configs..."
|
||||
tar xzf "$nginx_file" -C "${LETSBE_BASE}/"
|
||||
log " Nginx configs restored."
|
||||
fi
|
||||
|
||||
# Restore all PostgreSQL databases found for this date
|
||||
log "Restoring PostgreSQL databases..."
|
||||
for pg_file in "${backup_path}"/pg_*"${date_str}"*.sql.gz; do
|
||||
[[ -f "$pg_file" ]] || continue
|
||||
# Extract tool name from filename: pg_<tool>_<date>.sql.gz
|
||||
local tool_name
|
||||
tool_name=$(basename "$pg_file" | sed "s/^pg_//;s/_${date_str}.*//")
|
||||
log " Restoring PostgreSQL: $tool_name"
|
||||
|
||||
# Find container and restore without interactive prompt
|
||||
local container db_name db_user
|
||||
case "$tool_name" in
|
||||
chatwoot) container="chatwoot-postgres"; db_name="chatwoot_production"; db_user="chatwoot" ;;
|
||||
nextcloud) container="nextcloud-postgres"; db_name="nextcloud"; db_user="nextcloud" ;;
|
||||
keycloak) container="keycloak-db"; db_name="keycloak"; db_user="keycloak" ;;
|
||||
n8n) container="n8n-postgres"; db_name="n8n"; db_user="postgres" ;;
|
||||
calcom) container="calcom-postgres"; db_name="calcom"; db_user="postgres" ;;
|
||||
umami) container="umami-db"; db_name="umami"; db_user="postgres" ;;
|
||||
nocodb) container="nocodb-postgres"; db_name="nocodb"; db_user="postgres" ;;
|
||||
typebot) container="typebot-db"; db_name="typebot"; db_user="postgres" ;;
|
||||
windmill) container="windmill-db"; db_name="windmill"; db_user="postgres" ;;
|
||||
glitchtip) container="glitchtip-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
penpot) container="penpot-postgres"; db_name="penpot"; db_user="postgres" ;;
|
||||
gitea) container="gitea-db"; db_name="gitea"; db_user="postgres" ;;
|
||||
odoo) container="odoo-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
listmonk) container="listmonk-db"; db_name="listmonk"; db_user="postgres" ;;
|
||||
documenso) container="documenso-db"; db_name="documenso_db"; db_user="postgres" ;;
|
||||
redash) container="redash-postgres"; db_name="postgres"; db_user="postgres" ;;
|
||||
activepieces) container="activepieces-postgres"; db_name="activepieces"; db_user="postgres" ;;
|
||||
orchestrator) container="orchestrator-db"; db_name="orchestrator"; db_user="orchestrator" ;;
|
||||
*) log " Skipping unknown tool: $tool_name"; continue ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
log " WARNING: Container '$container' not running, skipping $tool_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$db_name' AND pid <> pg_backend_pid();" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "DROP DATABASE IF EXISTS \"$db_name\";" postgres 2>/dev/null || true
|
||||
docker exec "$actual_container" psql -U "$db_user" -c "CREATE DATABASE \"$db_name\";" postgres 2>/dev/null || true
|
||||
gunzip -c "$pg_file" | docker exec -i "$actual_container" psql -U "$db_user" "$db_name" > /dev/null 2>&1
|
||||
log " OK: $tool_name"
|
||||
done
|
||||
|
||||
# Restore MySQL databases
|
||||
log "Restoring MySQL databases..."
|
||||
for mysql_file in "${backup_path}"/mysql_*"${date_str}"*.sql.gz; do
|
||||
[[ -f "$mysql_file" ]] || continue
|
||||
local tool_name
|
||||
tool_name=$(basename "$mysql_file" | sed "s/^mysql_//;s/_${date_str}.*//")
|
||||
log " Restoring MySQL: $tool_name"
|
||||
|
||||
local container db_name db_pass
|
||||
case "$tool_name" in
|
||||
wordpress)
|
||||
container="wordpress-mysql"; db_name="wordpress"
|
||||
db_pass=$(grep "^WORDPRESS_MARIADB_ROOT_PASSWORD=" "${LETSBE_BASE}/env/credentials.env" 2>/dev/null | cut -d'=' -f2-)
|
||||
;;
|
||||
ghost)
|
||||
container="ghost-db"; db_name="ghost"
|
||||
db_pass=$(grep "^GHOST_MYSQL_PASSWORD=" "${LETSBE_BASE}/env/credentials.env" 2>/dev/null | cut -d'=' -f2-)
|
||||
;;
|
||||
*) log " Skipping unknown MySQL tool: $tool_name"; continue ;;
|
||||
esac
|
||||
|
||||
local actual_container
|
||||
actual_container=$(find_container "$container")
|
||||
if [[ -z "$actual_container" ]]; then
|
||||
log " WARNING: Container '$container' not running, skipping $tool_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ -n "$db_pass" ]]; then
|
||||
gunzip -c "$mysql_file" | docker exec -i "$actual_container" mysql -uroot -p"$db_pass" "$db_name" 2>/dev/null
|
||||
log " OK: $tool_name"
|
||||
else
|
||||
log " SKIP: No password found for $tool_name"
|
||||
fi
|
||||
done
|
||||
|
||||
# Restore MongoDB databases
|
||||
log "Restoring MongoDB databases..."
|
||||
for mongo_file in "${backup_path}"/mongo_*"${date_str}"*.archive.gz; do
|
||||
[[ -f "$mongo_file" ]] || continue
|
||||
local tool_name
|
||||
tool_name=$(basename "$mongo_file" | sed "s/^mongo_//;s/_${date_str}.*//")
|
||||
log " Restoring MongoDB: $tool_name"
|
||||
|
||||
case "$tool_name" in
|
||||
librechat)
|
||||
local actual_container
|
||||
actual_container=$(find_container "librechat-mongodb")
|
||||
if [[ -n "$actual_container" ]]; then
|
||||
gunzip -c "$mongo_file" | docker exec -i "$actual_container" mongorestore --db LibreChat --drop --archive 2>/dev/null
|
||||
log " OK: $tool_name"
|
||||
else
|
||||
log " WARNING: Container not running, skipping"
|
||||
fi
|
||||
;;
|
||||
*) log " Skipping unknown MongoDB tool: $tool_name" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
log ""
|
||||
log "=== Full Restore Complete ==="
|
||||
log "Now restart all tool stacks:"
|
||||
log " for stack in ${LETSBE_BASE}/stacks/*/docker-compose.yml; do"
|
||||
log " docker-compose -f \"\$stack\" restart"
|
||||
log " done"
|
||||
log " systemctl restart nginx"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# MAIN
|
||||
# =============================================================================
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "LetsBe Restore Tool"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " $0 list List local backups"
|
||||
echo " $0 list-remote List remote backups"
|
||||
echo " $0 download <DATE> Download remote backup"
|
||||
echo " $0 postgres <TOOL> <FILE> Restore PostgreSQL database"
|
||||
echo " $0 mysql <TOOL> <FILE> Restore MySQL database"
|
||||
echo " $0 mongo <TOOL> <FILE> Restore MongoDB database"
|
||||
echo " $0 env <FILE> Restore env files"
|
||||
echo " $0 configs <FILE> Restore config files"
|
||||
echo " $0 nginx <FILE> Restore nginx configs"
|
||||
echo " $0 full <DATE> Full system restore"
|
||||
echo ""
|
||||
echo "PostgreSQL tools: chatwoot, nextcloud, keycloak, n8n, calcom, umami,"
|
||||
echo " nocodb, typebot, windmill, glitchtip, penpot, gitea, odoo, listmonk,"
|
||||
echo " documenso, redash, activepieces, orchestrator"
|
||||
echo ""
|
||||
echo "MySQL tools: wordpress, ghost"
|
||||
echo ""
|
||||
echo "MongoDB tools: librechat"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
list) cmd_list ;;
|
||||
list-remote) cmd_list_remote ;;
|
||||
download) [[ $# -ge 2 ]] || die "Usage: $0 download <DATE>"; cmd_download "$2" ;;
|
||||
postgres) [[ $# -ge 3 ]] || die "Usage: $0 postgres <TOOL> <FILE>"; cmd_restore_postgres "$2" "$3" ;;
|
||||
mysql) [[ $# -ge 3 ]] || die "Usage: $0 mysql <TOOL> <FILE>"; cmd_restore_mysql "$2" "$3" ;;
|
||||
mongo) [[ $# -ge 3 ]] || die "Usage: $0 mongo <TOOL> <FILE>"; cmd_restore_mongo "$2" "$3" ;;
|
||||
env) [[ $# -ge 2 ]] || die "Usage: $0 env <FILE>"; cmd_restore_env "$2" ;;
|
||||
configs) [[ $# -ge 2 ]] || die "Usage: $0 configs <FILE>"; cmd_restore_configs "$2" ;;
|
||||
nginx) [[ $# -ge 2 ]] || die "Usage: $0 nginx <FILE>"; cmd_restore_nginx "$2" ;;
|
||||
full) [[ $# -ge 2 ]] || die "Usage: $0 full <DATE>"; cmd_full_restore "$2" ;;
|
||||
*) die "Unknown command: $1. Run '$0' for usage." ;;
|
||||
esac
|
||||
|
|
@ -0,0 +1,831 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# LetsBe Server Setup Script
|
||||
# This script sets up the server and deploys selected tools.
|
||||
#
|
||||
# Usage:
|
||||
# ./setup.sh --tools "all" --domain "example.com"
|
||||
# ./setup.sh --tools "portainer,n8n,baserow" --domain "example.com"
|
||||
# ./setup.sh --tools "1,2,3"
|
||||
# ./setup.sh # Foundation only, no tools deployed
|
||||
#
|
||||
# Arguments:
|
||||
# --tools Comma-separated list of tools to deploy, "all", or tool numbers
|
||||
# --domain Domain name for SSL email (administrator@domain)
|
||||
# --skip-ssl Skip SSL certificate setup (useful for testing)
|
||||
# --admin-user Admin username to create with SSH key access
|
||||
# --admin-ssh-key Public SSH key for the admin user
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Prevent interactive prompts during apt install
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# =============================================================================
|
||||
# ARGUMENT PARSING
|
||||
# =============================================================================
|
||||
|
||||
TOOLS_TO_DEPLOY=""
|
||||
SKIP_SSL=false
|
||||
ROOT_SSL=false
|
||||
DOMAIN=""
|
||||
|
||||
# Docker registry authentication (optional)
|
||||
DOCKER_USER=""
|
||||
DOCKER_TOKEN=""
|
||||
DOCKER_REGISTRY=""
|
||||
|
||||
# Gitea registry authentication (for private images from code.letsbe.solutions)
|
||||
GITEA_REGISTRY=""
|
||||
GITEA_USER=""
|
||||
GITEA_TOKEN=""
|
||||
|
||||
# Admin user setup (optional - replaces hardcoded user)
|
||||
ADMIN_USER=""
|
||||
ADMIN_SSH_KEY=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--tools)
|
||||
TOOLS_TO_DEPLOY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--domain)
|
||||
DOMAIN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-ssl)
|
||||
SKIP_SSL=true
|
||||
shift
|
||||
;;
|
||||
--root-ssl)
|
||||
ROOT_SSL=true
|
||||
shift
|
||||
;;
|
||||
--docker-user)
|
||||
DOCKER_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-token)
|
||||
DOCKER_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--docker-registry)
|
||||
DOCKER_REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-registry)
|
||||
GITEA_REGISTRY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-user)
|
||||
GITEA_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--gitea-token)
|
||||
GITEA_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--admin-user)
|
||||
ADMIN_USER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--admin-ssh-key)
|
||||
ADMIN_SSH_KEY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [--tools \"tool1,tool2,...\"|\"all\"] [--domain DOMAIN] [--skip-ssl] [--root-ssl]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --tools Comma-separated list of tools, 'all', or tool numbers"
|
||||
echo " --domain Domain name for SSL email (administrator@domain)"
|
||||
echo " --skip-ssl Skip SSL certificate setup"
|
||||
echo " --root-ssl Include root domain in SSL certificate"
|
||||
echo " --docker-user Docker registry username (optional)"
|
||||
echo " --docker-token Docker registry password/token (optional)"
|
||||
echo " --docker-registry Docker registry URL (optional, defaults to Docker Hub)"
|
||||
echo " --admin-user Admin username to create with SSH key access"
|
||||
echo " --admin-ssh-key Public SSH key for the admin user"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 --tools \"all\" --domain \"example.com\""
|
||||
echo " $0 --tools \"portainer,n8n,baserow\""
|
||||
echo " $0 --tools \"1,5,10\""
|
||||
echo " $0 # Foundation only"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Use --help for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "=== LetsBe Server Setup ==="
|
||||
echo ""
|
||||
|
||||
# =============================================================================
|
||||
# PACKAGE INSTALLATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[1/10] Installing system packages..."
|
||||
sudo apt update && sudo apt upgrade -y
|
||||
sudo apt install -y build-essential net-tools tree wget jq nano curl htop ufw fail2ban unattended-upgrades apt-listchanges apticron git gnupg ca-certificates apache2-utils acl certbot python3-certbot-nginx rsync rclone s3cmd zip sudo iptables htop dstat openssl
|
||||
|
||||
# =============================================================================
|
||||
# DOCKER INSTALLATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[2/10] Installing Docker..."
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
# Use --batch and --yes for non-interactive gpg (required for nohup/background execution)
|
||||
sudo curl -fsSL https://download.docker.com/linux/debian/gpg -o /tmp/docker.gpg
|
||||
sudo gpg --batch --yes --dearmor -o /etc/apt/keyrings/docker.gpg /tmp/docker.gpg
|
||||
rm -f /tmp/docker.gpg
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
sudo echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo $VERSION_CODENAME) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
sudo apt update
|
||||
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
sudo systemctl enable docker
|
||||
|
||||
sudo curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-linux-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
sudo chmod 755 /usr/local/bin/docker-compose
|
||||
|
||||
# Docker registry login (optional - for private registries or to bypass rate limits)
|
||||
if [[ -n "$DOCKER_USER" && -n "$DOCKER_TOKEN" ]]; then
|
||||
if [[ -n "$DOCKER_REGISTRY" ]]; then
|
||||
echo "Logging into Docker registry: $DOCKER_REGISTRY..."
|
||||
echo "$DOCKER_TOKEN" | docker login -u "$DOCKER_USER" --password-stdin "$DOCKER_REGISTRY"
|
||||
else
|
||||
echo "Logging into Docker Hub..."
|
||||
echo "$DOCKER_TOKEN" | docker login -u "$DOCKER_USER" --password-stdin
|
||||
fi
|
||||
fi
|
||||
|
||||
# Gitea registry login (for private images from code.letsbe.solutions)
|
||||
if [[ -n "$GITEA_REGISTRY" && -n "$GITEA_USER" && -n "$GITEA_TOKEN" ]]; then
|
||||
echo "Logging into Gitea registry: $GITEA_REGISTRY..."
|
||||
echo "$GITEA_TOKEN" | docker login -u "$GITEA_USER" --password-stdin "$GITEA_REGISTRY"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# DISABLE CONFLICTING SERVICES
|
||||
# =============================================================================
|
||||
|
||||
echo "[3/10] Disabling conflicting services..."
|
||||
sudo systemctl stop exim4 2>/dev/null || true
|
||||
sudo systemctl disable exim4 2>/dev/null || true
|
||||
|
||||
sudo systemctl stop apache2 2>/dev/null || true
|
||||
sudo systemctl disable apache2 2>/dev/null || true
|
||||
sudo apt remove -y apache2 2>/dev/null || true
|
||||
|
||||
# =============================================================================
|
||||
# NGINX INSTALLATION & CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[4/10] Installing and configuring nginx..."
|
||||
sudo apt install -y nginx
|
||||
sudo systemctl enable nginx
|
||||
|
||||
sudo rm -f /etc/nginx/sites-enabled/default
|
||||
|
||||
openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" -keyout /etc/nginx/placeholder.key -out /etc/nginx/placeholder.crt
|
||||
|
||||
cat <<EOF > /etc/nginx/conf.d/fallback.conf
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
server_name _;
|
||||
return 444;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 443 ssl default_server;
|
||||
server_name _;
|
||||
return 444;
|
||||
|
||||
ssl_certificate /etc/nginx/placeholder.crt;
|
||||
ssl_certificate_key /etc/nginx/placeholder.key;
|
||||
}
|
||||
EOF
|
||||
|
||||
sudo systemctl restart nginx
|
||||
|
||||
# =============================================================================
|
||||
# FIREWALL CONFIGURATION
|
||||
# =============================================================================
|
||||
|
||||
echo "[5/10] Configuring UFW firewall..."
|
||||
ufw allow 22
|
||||
ufw allow 22022
|
||||
ufw allow 80
|
||||
ufw allow 443
|
||||
|
||||
# Open mail ports only if Poste mail server is being deployed
|
||||
if [[ "$TOOLS_TO_DEPLOY" == *"poste"* || "$TOOLS_TO_DEPLOY" == "all" ]]; then
|
||||
echo "Opening mail ports for Poste..."
|
||||
ufw allow 25
|
||||
ufw allow 587
|
||||
ufw allow 143
|
||||
ufw allow 110
|
||||
ufw allow 4190
|
||||
ufw allow 465
|
||||
ufw allow 993
|
||||
ufw allow 995
|
||||
fi
|
||||
|
||||
ufw --force enable
|
||||
|
||||
# =============================================================================
|
||||
# ADMIN USER SETUP
|
||||
# =============================================================================
|
||||
|
||||
if [[ -n "$ADMIN_USER" && -n "$ADMIN_SSH_KEY" ]]; then
|
||||
echo "[6/10] Configuring admin user '$ADMIN_USER'..."
|
||||
|
||||
if ! id -u "$ADMIN_USER" > /dev/null 2>&1; then
|
||||
echo "User $ADMIN_USER does not exist, will be created."
|
||||
useradd -m -s /bin/bash "$ADMIN_USER"
|
||||
fi
|
||||
|
||||
mkdir -p /home/$ADMIN_USER/.ssh
|
||||
chmod 700 /home/$ADMIN_USER/.ssh
|
||||
|
||||
echo "$ADMIN_SSH_KEY" >> /home/$ADMIN_USER/.ssh/authorized_keys
|
||||
chmod 600 /home/$ADMIN_USER/.ssh/authorized_keys
|
||||
chown -R $ADMIN_USER:$ADMIN_USER /home/$ADMIN_USER/.ssh
|
||||
|
||||
usermod -aG docker "$ADMIN_USER"
|
||||
|
||||
echo "Public key was added for user $ADMIN_USER."
|
||||
else
|
||||
echo "[6/10] Skipping admin user setup (no --admin-user and --admin-ssh-key provided)"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SSH SECURITY HARDENING
|
||||
# =============================================================================
|
||||
|
||||
echo "[7/10] Hardening SSH configuration..."
|
||||
cat <<EOF > /etc/ssh/sshd_config
|
||||
Include /etc/ssh/sshd_config.d/*.conf
|
||||
|
||||
Port 22022
|
||||
#AddressFamily any
|
||||
#ListenAddress 0.0.0.0
|
||||
#ListenAddress ::
|
||||
|
||||
#HostKey /etc/ssh/ssh_host_rsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ecdsa_key
|
||||
#HostKey /etc/ssh/ssh_host_ed25519_key
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel VERBOSE
|
||||
|
||||
LoginGraceTime 2m
|
||||
PermitRootLogin prohibit-password
|
||||
#StrictModes yes
|
||||
MaxAuthTries 6
|
||||
MaxSessions 10
|
||||
|
||||
PasswordAuthentication no
|
||||
PermitEmptyPasswords no
|
||||
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
UsePAM yes
|
||||
|
||||
X11Forwarding yes
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
UsePrivilegeSeparation sandbox
|
||||
AuthenticationMethods publickey
|
||||
EOF
|
||||
|
||||
# NOTE: SSH restart moved to end of script to keep connection alive
|
||||
|
||||
# =============================================================================
|
||||
# AUTOMATIC SECURITY UPDATES
|
||||
# =============================================================================
|
||||
|
||||
echo "[8/10] Configuring automatic security updates..."
|
||||
cat <<EOF > /etc/apt/apt.conf.d/20auto-upgrades
|
||||
// Enable the update/upgrade script (0=disable)
|
||||
APT::Periodic::Enable "1";
|
||||
|
||||
// Do "apt-get update" automatically every n-days (0=disable)
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
|
||||
// Do "apt-get upgrade --download-only" every n-days (0=disable)
|
||||
APT::Periodic::Download-Upgradeable-Packages "1";
|
||||
|
||||
// Do "apt-get autoclean" every n-days (0=disable)
|
||||
APT::Periodic::AutocleanInterval "7";
|
||||
|
||||
// Send report mail to root
|
||||
// 0: no report (or null string)
|
||||
// 1: progress report (actually any string)
|
||||
// 2: + command outputs (remove -qq, remove 2>/dev/null, add -d)
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
|
||||
// Automatically upgrade packages from these
|
||||
Unattended-Upgrade::Origins-Pattern {
|
||||
// "o=Debian,a=stable";
|
||||
// "o=Debian,a=stable-updates";
|
||||
"origin=Debian,codename=\${distro_codename},label=Debian-Security";
|
||||
};
|
||||
|
||||
// You can specify your own packages to NOT automatically upgrade here
|
||||
Unattended-Upgrade::Package-Blacklist {
|
||||
};
|
||||
|
||||
// Run dpkg --force-confold --configure -a if a unclean dpkg state is detected to true to ensure that updates get installed even when the system got interrupted during a previous run
|
||||
Unattended-Upgrade::AutoFixInterruptedDpkg "true";
|
||||
|
||||
// Perform the upgrade when the machine is running because we wont be shutting our server down often
|
||||
Unattended-Upgrade::InstallOnShutdown "false";
|
||||
|
||||
// Send an email to this address with information about the packages upgraded.
|
||||
Unattended-Upgrade::Mail "administrator@letsbe.biz";
|
||||
|
||||
// Always send an e-mail
|
||||
Unattended-Upgrade::MailOnlyOnError "true";
|
||||
|
||||
// Remove all unused dependencies after the upgrade has finished
|
||||
Unattended-Upgrade::Remove-Unused-Dependencies "true";
|
||||
|
||||
// Remove any new unused dependencies after the upgrade has finished
|
||||
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
|
||||
|
||||
// Automatically reboot WITHOUT CONFIRMATION if the file /var/run/reboot-required is found after the upgrade.
|
||||
Unattended-Upgrade::Automatic-Reboot "false";
|
||||
|
||||
// Automatically reboot even if users are logged in.
|
||||
Unattended-Upgrade::Automatic-Reboot-WithUsers "false";
|
||||
EOF
|
||||
|
||||
# =============================================================================
|
||||
# BACKUP SCRIPT & CRON
|
||||
# =============================================================================
|
||||
|
||||
echo "Setting up backup script and cron..."
|
||||
chmod 750 /opt/letsbe/scripts/backups.sh 2>/dev/null || true
|
||||
chmod 750 /opt/letsbe/scripts/restore.sh 2>/dev/null || true
|
||||
mkdir -p /root/.config/rclone
|
||||
mkdir -p /opt/letsbe/logs
|
||||
|
||||
# Install backup cron non-interactively (daily at 2am)
|
||||
BACKUP_CRON="0 2 * * * /bin/bash /opt/letsbe/scripts/backups.sh >> /opt/letsbe/logs/backup.log 2>&1"
|
||||
( crontab -l 2>/dev/null | grep -v "backups.sh"; echo "$BACKUP_CRON" ) | crontab -
|
||||
echo "Backup cron installed (daily at 2:00 AM)"
|
||||
|
||||
# =============================================================================
|
||||
# TOOL DEPLOYMENT
|
||||
# =============================================================================
|
||||
|
||||
echo "[9/10] Deploying tools..."
|
||||
|
||||
# Get list of available tools
|
||||
mapfile -t available_tools < <(ls /opt/letsbe/stacks/*/docker-compose.yml 2>/dev/null | xargs -I {} dirname {} | xargs -I {} basename {})
|
||||
|
||||
if [[ -z "$TOOLS_TO_DEPLOY" ]]; then
|
||||
echo "No tools specified. Skipping tool deployment."
|
||||
echo "Available tools: ${available_tools[*]}"
|
||||
echo "Use --tools to deploy tools later."
|
||||
else
|
||||
# Determine which tools to deploy
|
||||
declare -a tools_list=()
|
||||
|
||||
if [[ "$TOOLS_TO_DEPLOY" == "all" || "$TOOLS_TO_DEPLOY" == "a" ]]; then
|
||||
tools_list=("${available_tools[@]}")
|
||||
else
|
||||
# Parse comma-separated list
|
||||
IFS=',' read -ra requested_tools <<< "$TOOLS_TO_DEPLOY"
|
||||
for tool in "${requested_tools[@]}"; do
|
||||
tool=$(echo "$tool" | xargs) # Trim whitespace
|
||||
|
||||
# Check if it's a number (index)
|
||||
if [[ "$tool" =~ ^[0-9]+$ ]]; then
|
||||
idx=$((tool - 1))
|
||||
if [[ $idx -ge 0 && $idx -lt ${#available_tools[@]} ]]; then
|
||||
tools_list+=("${available_tools[$idx]}")
|
||||
else
|
||||
echo "Warning: Tool index $tool out of range, skipping."
|
||||
fi
|
||||
else
|
||||
# It's a tool name
|
||||
if [[ " ${available_tools[*]} " =~ " ${tool} " ]]; then
|
||||
tools_list+=("$tool")
|
||||
else
|
||||
echo "Warning: Tool '$tool' not found, skipping."
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Ensure orchestrator is FIRST (creates network that sysadmin needs)
|
||||
if [[ -f "/opt/letsbe/stacks/orchestrator/docker-compose.yml" ]]; then
|
||||
# Remove orchestrator from current position if present
|
||||
declare -a new_list=()
|
||||
for tool in "${tools_list[@]}"; do
|
||||
if [[ "$tool" != "orchestrator" ]]; then
|
||||
new_list+=("$tool")
|
||||
fi
|
||||
done
|
||||
# Prepend orchestrator to front
|
||||
tools_list=("orchestrator" "${new_list[@]}")
|
||||
echo "Orchestrator moved to front (creates network for sysadmin)"
|
||||
fi
|
||||
|
||||
echo "Deploying tools: ${tools_list[*]}"
|
||||
|
||||
# Track deployed tools for SSL setup
|
||||
DEPLOYED_TOOLS=()
|
||||
|
||||
for tool_name in "${tools_list[@]}"; do
|
||||
compose_file="/opt/letsbe/stacks/${tool_name}/docker-compose.yml"
|
||||
|
||||
if [[ -f "$compose_file" ]]; then
|
||||
# Copy .env file to centralized env directory if it exists
|
||||
stack_env="/opt/letsbe/stacks/${tool_name}/.env"
|
||||
central_env="/opt/letsbe/env/${tool_name}.env"
|
||||
if [[ -f "$stack_env" ]]; then
|
||||
cp "$stack_env" "$central_env"
|
||||
chmod 600 "$central_env"
|
||||
echo "Copied env file for $tool_name"
|
||||
fi
|
||||
|
||||
# Tool-specific pre-deployment setup
|
||||
if [[ "$tool_name" == "nextcloud" ]]; then
|
||||
echo "Creating Nextcloud bind mount directories..."
|
||||
mkdir -p /opt/letsbe/config/nextcloud
|
||||
mkdir -p /opt/letsbe/data/nextcloud
|
||||
# Set appropriate ownership for www-data (uid 33 in Nextcloud container)
|
||||
chown -R 33:33 /opt/letsbe/config/nextcloud
|
||||
chown -R 33:33 /opt/letsbe/data/nextcloud
|
||||
fi
|
||||
|
||||
if [[ "$tool_name" == "sysadmin" ]]; then
|
||||
echo " Pulling latest sysadmin agent image..."
|
||||
docker-compose -f "$compose_file" pull
|
||||
fi
|
||||
|
||||
echo "Starting $tool_name..."
|
||||
docker-compose -f "$compose_file" up -d
|
||||
|
||||
# Tool-specific post-deployment initialization
|
||||
if [[ "$tool_name" == "portainer" ]]; then
|
||||
echo "Configuring Portainer local Docker endpoint..."
|
||||
|
||||
# Get Portainer container name
|
||||
PORTAINER_CONTAINER=$(docker ps --format '{{.Names}}' | grep portainer | head -1)
|
||||
|
||||
if [[ -n "$PORTAINER_CONTAINER" ]]; then
|
||||
# Wait for Portainer to be ready
|
||||
echo " Waiting for Portainer to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -ks https://localhost:9443/api/system/status >/dev/null 2>&1; then
|
||||
echo " Portainer is ready."
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Read admin password from file
|
||||
PORTAINER_PASSWORD=$(cat /opt/letsbe/env/portainer_admin_password.txt 2>/dev/null)
|
||||
|
||||
if [[ -n "$PORTAINER_PASSWORD" ]]; then
|
||||
# Authenticate and get JWT token
|
||||
echo " Authenticating with Portainer..."
|
||||
JWT_RESPONSE=$(curl -ks -X POST https://localhost:9443/api/auth \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"username\":\"admin\",\"password\":\"${PORTAINER_PASSWORD}\"}" 2>/dev/null)
|
||||
|
||||
JWT=$(echo "$JWT_RESPONSE" | grep -o '"jwt":"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
if [[ -n "$JWT" ]]; then
|
||||
echo " Creating local Docker endpoint..."
|
||||
# Create local Docker socket endpoint
|
||||
ENDPOINT_RESPONSE=$(curl -ks -X POST https://localhost:9443/api/endpoints \
|
||||
-H "Authorization: Bearer $JWT" \
|
||||
-H "Content-Type: multipart/form-data" \
|
||||
-F "Name=local" \
|
||||
-F "EndpointCreationType=1" 2>/dev/null)
|
||||
|
||||
if echo "$ENDPOINT_RESPONSE" | grep -q '"Id"'; then
|
||||
echo " Local Docker endpoint created successfully."
|
||||
else
|
||||
echo " Warning: Endpoint creation response: $ENDPOINT_RESPONSE"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not authenticate with Portainer"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not read Portainer password file"
|
||||
fi
|
||||
else
|
||||
echo " Warning: Could not find Portainer container"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$tool_name" == "chatwoot" ]]; then
|
||||
echo "Initializing Chatwoot database (pgvector + migrations)..."
|
||||
|
||||
# Get the customer prefix from the container name
|
||||
CHATWOOT_POSTGRES=$(docker ps --format '{{.Names}}' | grep chatwoot-postgres | head -1)
|
||||
CHATWOOT_RAILS=$(docker ps --format '{{.Names}}' | grep chatwoot-rails | head -1)
|
||||
|
||||
if [[ -n "$CHATWOOT_POSTGRES" && -n "$CHATWOOT_RAILS" ]]; then
|
||||
# Wait for Postgres to be ready
|
||||
echo " Waiting for Postgres to be ready..."
|
||||
for i in {1..30}; do
|
||||
if docker exec "$CHATWOOT_POSTGRES" pg_isready -U chatwoot -d chatwoot_production >/dev/null 2>&1; then
|
||||
echo " Postgres is ready."
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Create pgvector extension
|
||||
echo " Creating pgvector extension..."
|
||||
docker exec "$CHATWOOT_POSTGRES" psql -U chatwoot -d chatwoot_production -c "CREATE EXTENSION IF NOT EXISTS vector;" 2>/dev/null || true
|
||||
|
||||
# Wait for Rails container to be fully up
|
||||
echo " Waiting for Rails container..."
|
||||
sleep 10
|
||||
|
||||
# Run database migrations
|
||||
echo " Running Chatwoot database prepare..."
|
||||
docker exec "$CHATWOOT_RAILS" bundle exec rails db:chatwoot_prepare 2>&1 || echo " Note: db:chatwoot_prepare may have already run"
|
||||
|
||||
echo " Chatwoot initialization complete."
|
||||
else
|
||||
echo " Warning: Could not find Chatwoot containers for initialization"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Link nginx config if exists
|
||||
nginx_conf="/opt/letsbe/nginx/${tool_name}.conf"
|
||||
if [[ -f "$nginx_conf" ]]; then
|
||||
cp "$nginx_conf" /etc/nginx/sites-available/
|
||||
ln -sf /etc/nginx/sites-available/${tool_name}.conf /etc/nginx/sites-enabled/
|
||||
echo "Nginx config linked for $tool_name"
|
||||
DEPLOYED_TOOLS+=("$tool_name")
|
||||
else
|
||||
echo "No nginx config for $tool_name (may not need one)"
|
||||
fi
|
||||
else
|
||||
echo "Warning: docker-compose.yml not found for $tool_name"
|
||||
fi
|
||||
done
|
||||
|
||||
# Restart nginx to apply new configs
|
||||
systemctl restart nginx
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SYSADMIN AGENT (Always deployed)
|
||||
# =============================================================================
|
||||
|
||||
echo "[9.5/10] Deploying sysadmin agent..."
|
||||
|
||||
SYSADMIN_COMPOSE="/opt/letsbe/stacks/sysadmin/docker-compose.yml"
|
||||
if [[ -f "$SYSADMIN_COMPOSE" ]]; then
|
||||
# Check if sysadmin is already running
|
||||
if docker ps --format '{{.Names}}' | grep -q "agent$"; then
|
||||
echo " Sysadmin agent already running, updating..."
|
||||
fi
|
||||
|
||||
echo " Pulling latest sysadmin agent image..."
|
||||
docker-compose -f "$SYSADMIN_COMPOSE" pull
|
||||
|
||||
echo " Starting sysadmin agent..."
|
||||
docker-compose -f "$SYSADMIN_COMPOSE" up -d
|
||||
|
||||
echo " Sysadmin agent deployed successfully."
|
||||
else
|
||||
echo "Warning: Sysadmin docker-compose.yml not found at $SYSADMIN_COMPOSE"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# LOCAL ORCHESTRATOR BOOTSTRAP (License Validation + Agent Registration)
|
||||
# =============================================================================
|
||||
|
||||
echo "[9.6/10] Running local orchestrator bootstrap..."
|
||||
|
||||
BOOTSTRAP_SCRIPT="/opt/letsbe/scripts/local_bootstrap.sh"
|
||||
CREDENTIALS_FILE="/opt/letsbe/env/credentials.env"
|
||||
|
||||
if [[ -f "$BOOTSTRAP_SCRIPT" && -f "$CREDENTIALS_FILE" ]]; then
|
||||
# Source credentials to get required variables
|
||||
source "$CREDENTIALS_FILE"
|
||||
|
||||
echo " Validating license and setting up local orchestrator..."
|
||||
echo " Instance ID: ${INSTANCE_ID:-unknown}"
|
||||
echo " Hub URL: ${HUB_URL:-unknown}"
|
||||
|
||||
# Run bootstrap script with required environment variables
|
||||
HUB_URL="${HUB_URL}" \
|
||||
LICENSE_KEY="${LICENSE_KEY}" \
|
||||
INSTANCE_ID="${INSTANCE_ID}" \
|
||||
ORCHESTRATOR_URL="http://localhost:8100" \
|
||||
ADMIN_API_KEY="${ADMIN_API_KEY}" \
|
||||
CUSTOMER="$(echo ${INSTANCE_ID} | sed 's/-orchestrator$//')" \
|
||||
CREDENTIALS_DIR="/opt/letsbe/env" \
|
||||
bash "$BOOTSTRAP_SCRIPT"
|
||||
|
||||
BOOTSTRAP_EXIT=$?
|
||||
|
||||
if [[ $BOOTSTRAP_EXIT -ne 0 ]]; then
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo " BOOTSTRAP FAILED"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
echo "License validation or agent registration failed."
|
||||
echo "Check the error messages above for details."
|
||||
echo ""
|
||||
echo "Common issues:"
|
||||
echo " - Invalid license_key in config.json"
|
||||
echo " - Network connectivity to Hub (${HUB_URL})"
|
||||
echo " - Instance not registered in LetsBe Hub"
|
||||
echo ""
|
||||
echo "The stack has been deployed but is NOT properly configured."
|
||||
echo "Please fix the issue and re-run: bash /opt/letsbe/scripts/local_bootstrap.sh"
|
||||
echo "=============================================="
|
||||
# Don't exit - let the rest of setup complete, but warn
|
||||
else
|
||||
echo " Bootstrap completed successfully!"
|
||||
echo " Agent should register with local orchestrator within 30 seconds."
|
||||
fi
|
||||
else
|
||||
if [[ ! -f "$BOOTSTRAP_SCRIPT" ]]; then
|
||||
echo "Warning: Bootstrap script not found at $BOOTSTRAP_SCRIPT"
|
||||
fi
|
||||
if [[ ! -f "$CREDENTIALS_FILE" ]]; then
|
||||
echo "Warning: Credentials file not found at $CREDENTIALS_FILE"
|
||||
echo "Run env_setup.sh first to generate credentials."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Collect domains from deployed tools' nginx configs (for SSL)
|
||||
SSL_DOMAINS=()
|
||||
if [[ ${#DEPLOYED_TOOLS[@]} -gt 0 ]]; then
|
||||
for tool_name in "${DEPLOYED_TOOLS[@]}"; do
|
||||
tool_conf="/etc/nginx/sites-enabled/${tool_name}.conf"
|
||||
if [[ -f "$tool_conf" ]]; then
|
||||
# Extract server_name values (excluding placeholders and _)
|
||||
while IFS= read -r domain; do
|
||||
if [[ -n "$domain" && "$domain" != "_" && ! "$domain" =~ \{\{ ]]; then
|
||||
SSL_DOMAINS+=("$domain")
|
||||
fi
|
||||
done < <(grep -h "server_name" "$tool_conf" 2>/dev/null | awk '{print $2}' | tr -d ';' | sort -u)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# SSL CERTIFICATE SETUP
|
||||
# =============================================================================
|
||||
|
||||
echo "[10/10] Setting up SSL certificates..."
|
||||
|
||||
# NOTE: Certbot cron disabled - crontab hangs in non-interactive mode
|
||||
# Certbot installs its own systemd timer, so manual cron not needed
|
||||
echo "Certbot renewal handled by systemd timer (certbot.timer)"
|
||||
|
||||
if [[ "$SKIP_SSL" == "true" ]]; then
|
||||
echo "Skipping SSL setup (--skip-ssl flag set)"
|
||||
elif [[ ${#SSL_DOMAINS[@]} -eq 0 ]]; then
|
||||
echo "No deployed tools with valid domains found."
|
||||
echo "Skipping SSL setup. Either:"
|
||||
echo " - No tools were deployed, or"
|
||||
echo " - Templates not replaced (run env_setup.sh first with --domain parameter)"
|
||||
echo "To manually setup SSL later: certbot --nginx -d yourdomain.com"
|
||||
else
|
||||
# Remove duplicates from SSL_DOMAINS
|
||||
SSL_DOMAINS=($(printf '%s\n' "${SSL_DOMAINS[@]}" | sort -u))
|
||||
|
||||
# Add root domain if --root-ssl flag is set
|
||||
if [[ "$ROOT_SSL" == "true" && -n "$DOMAIN" ]]; then
|
||||
# Check if root domain is not already in the list
|
||||
if [[ ! " ${SSL_DOMAINS[*]} " =~ " ${DOMAIN} " ]]; then
|
||||
SSL_DOMAINS+=("$DOMAIN")
|
||||
echo "Including root domain: $DOMAIN"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "----"
|
||||
echo "Setting up SSL certificates for deployed tools:"
|
||||
for domain in "${SSL_DOMAINS[@]}"; do
|
||||
echo " - $domain"
|
||||
done
|
||||
echo ""
|
||||
echo "Make sure DNS entries point to this server IP before proceeding."
|
||||
|
||||
# Derive email from domain parameter or use default
|
||||
if [[ -n "$DOMAIN" ]]; then
|
||||
SSL_EMAIL="administrator@${DOMAIN}"
|
||||
else
|
||||
# Try to extract base domain from first SSL domain
|
||||
FIRST_DOMAIN="${SSL_DOMAINS[0]}"
|
||||
# Extract base domain (remove subdomain)
|
||||
BASE_DOMAIN=$(echo "$FIRST_DOMAIN" | awk -F. '{if(NF>2) print $(NF-1)"."$NF; else print $0}')
|
||||
SSL_EMAIL="administrator@${BASE_DOMAIN}"
|
||||
fi
|
||||
|
||||
echo "Using email: $SSL_EMAIL"
|
||||
|
||||
# Build domain arguments for certbot
|
||||
DOMAIN_ARGS=""
|
||||
for domain in "${SSL_DOMAINS[@]}"; do
|
||||
DOMAIN_ARGS="$DOMAIN_ARGS -d $domain"
|
||||
done
|
||||
|
||||
# Run certbot non-interactively with specific domains
|
||||
sudo certbot --nginx \
|
||||
--non-interactive \
|
||||
--agree-tos \
|
||||
--email "$SSL_EMAIL" \
|
||||
--redirect \
|
||||
$DOMAIN_ARGS \
|
||||
|| echo "Certbot completed (some domains may have failed - check DNS)"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
# COMPLETION SUMMARY
|
||||
# =============================================================================
|
||||
|
||||
echo ""
|
||||
echo "----"
|
||||
echo "Configured domains:"
|
||||
for conf_file in /etc/nginx/sites-enabled/*.conf; do
|
||||
if [[ -f "$conf_file" ]]; then
|
||||
server_names=$(grep -E "^\s*server_name\s+" "$conf_file" 2>/dev/null | awk '{print $2}' | tr -d ';' | sort | uniq)
|
||||
for server_name in $server_names; do
|
||||
if [[ "$server_name" != "_" ]]; then
|
||||
echo " - $server_name ($conf_file)"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
SERVER_IP=$(curl -4 -s ifconfig.co)
|
||||
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo " LetsBe Server Setup Complete"
|
||||
echo "=============================================="
|
||||
echo ""
|
||||
echo "Server IP: $SERVER_IP"
|
||||
echo "SSH Port: 22022"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo "SSH User: $ADMIN_USER (key-based auth only)"
|
||||
else
|
||||
echo "SSH User: root (key-based auth only, no admin user configured)"
|
||||
fi
|
||||
echo ""
|
||||
echo "Portainer (if deployed): https://$SERVER_IP:9443"
|
||||
echo ""
|
||||
echo "Important:"
|
||||
echo " - Configure rclone for backups: rclone config"
|
||||
echo " - SSH port changed to 22022"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo " - User '$ADMIN_USER' has Docker access (key in /home/$ADMIN_USER/.ssh/)"
|
||||
fi
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
|
||||
# =============================================================================
|
||||
# MARK SETUP AS COMPLETE (before SSH restart)
|
||||
# =============================================================================
|
||||
|
||||
touch /opt/letsbe/.setup_installed
|
||||
echo "Setup marked as complete."
|
||||
|
||||
# =============================================================================
|
||||
# RESTART SSH (MUST BE LAST - This will disconnect the session!)
|
||||
# =============================================================================
|
||||
|
||||
echo ""
|
||||
echo "Restarting SSH on port 22022... (connection will drop)"
|
||||
if [[ -n "$ADMIN_USER" ]]; then
|
||||
echo "Reconnect with: ssh -i id_ed25519 -p 22022 $ADMIN_USER@$SERVER_IP"
|
||||
else
|
||||
echo "Reconnect with: ssh -p 22022 root@$SERVER_IP"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Small delay to ensure output is sent before disconnect
|
||||
sleep 2
|
||||
|
||||
systemctl restart sshd
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
## It's advisable to consult the documentation and use the tools/deploy.sh to generate the passwords, keys, instead of manually filling them.
|
||||
|
||||
AP_ENGINE_EXECUTABLE_PATH=dist/packages/engine/main.js
|
||||
|
||||
## Random Long Password (Optional for community edition)
|
||||
AP_API_KEY={{ activepieces_api_key }}
|
||||
|
||||
## 256 bit encryption key, 32 hex character
|
||||
AP_ENCRYPTION_KEY={{ activepieces_encryption_key }}
|
||||
|
||||
## JWT Secret
|
||||
AP_JWT_SECRET={{ activepieces_jwt_secret }}
|
||||
|
||||
AP_ENVIRONMENT=prod
|
||||
AP_FRONTEND_URL=https://{{ domain_activepieces }}
|
||||
AP_WEBHOOK_TIMEOUT_SECONDS=30
|
||||
AP_TRIGGER_DEFAULT_POLL_INTERVAL=5
|
||||
AP_POSTGRES_DATABASE=activepieces
|
||||
AP_POSTGRES_HOST=postgres
|
||||
AP_POSTGRES_PORT=5432
|
||||
AP_POSTGRES_USERNAME=activepieces-postgres
|
||||
AP_POSTGRES_PASSWORD={{ activepieces_postgres_password }}
|
||||
AP_EXECUTION_MODE=UNSANDBOXED
|
||||
AP_REDIS_HOST=redis
|
||||
AP_REDIS_PORT=6379
|
||||
AP_FLOW_TIMEOUT_SECONDS=600
|
||||
AP_TELEMETRY_ENABLED=true
|
||||
AP_TEMPLATES_SOURCE_URL="https://cloud.activepieces.com/api/v1/flow-templates"
|
||||
AP_PROJECT_RATE_LIMITER_ENABLED=false
|
||||
AP_PIECES_SOURCE=DB
|
||||
AP_FILE_STORAGE_LOCATION=DB
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
version: '3.0'
|
||||
|
||||
services:
|
||||
activepieces:
|
||||
image: ghcr.io/activepieces/activepieces:0.39.2
|
||||
container_name: {{ customer }}-activepieces
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
## Enable the following line if you already use AP_EXECUTION_MODE with SANDBOXED or old activepieces, checking the breaking change documentation for mor>
|
||||
ports:
|
||||
- '3056:80'
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
env_file: /opt/letsbe/env/activepieces.env
|
||||
networks:
|
||||
{{ customer }}-activepieces:
|
||||
ipv4_address: 172.20.27.2
|
||||
postgres:
|
||||
image: 'postgres:14.4'
|
||||
container_name: activepieces-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- 'POSTGRES_DB=${AP_POSTGRES_DATABASE}'
|
||||
- 'POSTGRES_PASSWORD=${AP_POSTGRES_PASSWORD}'
|
||||
- 'POSTGRES_USER=${AP_POSTGRES_USERNAME}'
|
||||
volumes:
|
||||
- activepieces_postgres_data:/var/lib/postgresql/data
|
||||
networks:
|
||||
{{ customer }}-activepieces:
|
||||
ipv4_address: 172.20.27.3
|
||||
redis:
|
||||
image: 'redis:7.0.7'
|
||||
container_name: activepieces-redis
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- 'activepieces_redis_data:/data'
|
||||
networks:
|
||||
{{ customer }}-activepieces:
|
||||
ipv4_address: 172.20.27.4
|
||||
|
||||
volumes:
|
||||
activepieces_postgres_data:
|
||||
activepieces_redis_data:
|
||||
|
||||
networks:
|
||||
{{ customer }}-activepieces:
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.27.0/28
|
||||
gateway: 172.20.27.1
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
# Set this value to 'agree' to accept our license:
|
||||
# LICENSE: https://github.com/calendso/calendso/blob/main/LICENSE
|
||||
#
|
||||
# Summary of terms:
|
||||
# - The codebase has to stay open source, whether it was modified or not
|
||||
# - You can not repackage or sell the codebase
|
||||
# - Acquire a commercial license to remove these terms by emailing: license@cal.com
|
||||
NEXT_PUBLIC_LICENSE_CONSENT=
|
||||
LICENSE=
|
||||
|
||||
# BASE_URL and NEXT_PUBLIC_APP_URL are both deprecated. Both are replaced with one variable, NEXT_PUBLIC_WEBAPP_URL
|
||||
# BASE_URL=http://localhost:3000
|
||||
# NEXT_PUBLIC_APP_URL=http://localhost:3000
|
||||
|
||||
NEXT_PUBLIC_WEBAPP_URL=https://{{ domain_calcom }}
|
||||
|
||||
# Configure NEXTAUTH_URL manually if needed, otherwise it will resolve to {NEXT_PUBLIC_WEBAPP_URL}/api/auth
|
||||
# NEXTAUTH_URL=http://localhost:3000/api/auth
|
||||
|
||||
# It is highly recommended that the NEXTAUTH_SECRET must be overridden and very unique
|
||||
# Use `openssl rand -base64 32` to generate a key
|
||||
NEXTAUTH_SECRET={{ calcom_nextauth_secret }}
|
||||
|
||||
# Encryption key that will be used to encrypt CalDAV credentials, choose a random string, for example with `dd if=/dev/urandom bs=1K count=1 | md5sum`
|
||||
CALENDSO_ENCRYPTION_KEY=md5sum
|
||||
|
||||
# Deprecation note: JWT_SECRET is no longer used
|
||||
# JWT_SECRET=secret
|
||||
|
||||
POSTGRES_USER={{ calcom_postgres_user }}
|
||||
POSTGRES_PASSWORD={{ calcom_postgres_password }}
|
||||
POSTGRES_DB=calcom
|
||||
DATABASE_HOST={{ customer }}-calcom-postgres:5432
|
||||
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
|
||||
GOOGLE_API_CREDENTIALS={}
|
||||
#Fix calcom db migration issues
|
||||
DATABASE_DIRECT_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
|
||||
|
||||
# Set this to '1' if you don't want Cal to collect anonymous usage
|
||||
CALCOM_TELEMETRY_DISABLED=1
|
||||
|
||||
# Used for the Office 365 / Outlook.com Calendar integration
|
||||
MS_GRAPH_CLIENT_ID=
|
||||
MS_GRAPH_CLIENT_SECRET=
|
||||
|
||||
# Used for the Zoom integration
|
||||
ZOOM_CLIENT_ID=
|
||||
ZOOM_CLIENT_SECRET=
|
||||
|
||||
# E-mail settings
|
||||
# Configures the global From: header whilst sending emails.
|
||||
EMAIL_FROM=system@{{ domain }}
|
||||
SUPPORT_MAIL_ADDRESS=support@{{ domain }}
|
||||
# Configure SMTP settings (@see https://nodemailer.com/smtp/).
|
||||
EMAIL_SERVER_HOST=mail.{{ domain }}
|
||||
EMAIL_SERVER_PORT=587
|
||||
EMAIL_SERVER_USER=system@{{ domain }}
|
||||
EMAIL_SERVER_PASSWORD=
|
||||
#EMAIL_SERVER_SECURE=false
|
||||
|
||||
NODE_ENV=production
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
services:
|
||||
calcom-postgres:
|
||||
container_name: {{ customer }}-calcom-postgres
|
||||
image: postgres:16 #original postgres
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-calcom-postgres:/var/lib/postgresql/data/
|
||||
- {{ customer }}-calcom-backups:/tmp/backups
|
||||
env_file: /opt/letsbe/env/calcom.env
|
||||
networks:
|
||||
{{ customer }}-calcom:
|
||||
ipv4_address: 172.20.18.2
|
||||
|
||||
calcom:
|
||||
container_name: {{ customer }}-calcom
|
||||
image: calcom/cal.com:v5.9.8
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- '127.0.0.1:3018:3000'
|
||||
env_file: /opt/letsbe/env/calcom.env
|
||||
environment:
|
||||
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
|
||||
depends_on:
|
||||
- calcom-postgres
|
||||
networks:
|
||||
{{ customer }}-calcom:
|
||||
ipv4_address: 172.20.18.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-calcom:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.18.0/28
|
||||
gateway: 172.20.18.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-calcom-postgres:
|
||||
{{ customer }}-calcom-backups:
|
||||
|
|
@ -0,0 +1,239 @@
|
|||
SECRET_KEY_BASE={{ chatwoot_secret_key_base }}
|
||||
|
||||
# Replace with the URL you are planning to use for your app
|
||||
FRONTEND_URL=https://{{ domain_chatwoot }}
|
||||
# To use a dedicated URL for help center pages
|
||||
HELPCENTER_URL=https://{{ domain_chatwoot_helpdesk }}
|
||||
|
||||
# If the variable is set, all non-authenticated pages would fallback to the default locale.
|
||||
# Whenever a new account is created, the default language will be DEFAULT_LOCALE instead of en
|
||||
# DEFAULT_LOCALE=en
|
||||
|
||||
# If you plan to use CDN for your assets, set Asset CDN Host
|
||||
ASSET_CDN_HOST=
|
||||
|
||||
# Force all access to the app over SSL, default is set to false
|
||||
FORCE_SSL=false
|
||||
|
||||
# This lets you control new sign ups on your chatwoot installation
|
||||
# true : default option, allows sign ups
|
||||
# false : disables all the end points related to sign ups
|
||||
# api_only: disables the UI for signup, but you can create sign ups via the account apis
|
||||
ENABLE_ACCOUNT_SIGNUP=false
|
||||
|
||||
# Redis config
|
||||
REDIS_URL=redis://:{{ chatwoot_redis_password }}@redis:6379
|
||||
# If you are using docker-compose, set this variable's value to be any string,
|
||||
# which will be the password for the redis service running inside the docker-compose
|
||||
# to make it secure
|
||||
REDIS_PASSWORD={{ chatwoot_redis_password }}
|
||||
# Redis Sentinel can be used by passing list of sentinel host and ports e,g. sentinel_host1:port1,sentinel_host2:port2
|
||||
REDIS_SENTINELS=
|
||||
# Redis sentinel master name is required when using sentinel, default value is "mymaster".
|
||||
# You can find list of master using "SENTINEL masters" command
|
||||
REDIS_SENTINEL_MASTER_NAME=
|
||||
|
||||
# By default Chatwoot will pass REDIS_PASSWORD as the password value for sentinels
|
||||
# Use the following environment variable to customize passwords for sentinels.
|
||||
# Use empty string if sentinels are configured with out passwords
|
||||
# REDIS_SENTINEL_PASSWORD=
|
||||
|
||||
# Redis premium breakage in heroku fix
|
||||
# enable the following configuration
|
||||
# ref: https://github.com/chatwoot/chatwoot/issues/2420
|
||||
# REDIS_OPENSSL_VERIFY_MODE=none
|
||||
|
||||
# Postgres Database config variables
|
||||
# You can leave POSTGRES_DATABASE blank. The default name of
|
||||
# the database in the production environment is chatwoot_production
|
||||
POSTGRES_DATABASE=chatwoot_production
|
||||
POSTGRES_HOST=postgres
|
||||
POSTGRES_USERNAME={{ chatwoot_postgres_username }}
|
||||
POSTGRES_PASSWORD={{ chatwoot_postgres_password }}
|
||||
RAILS_ENV=production
|
||||
# Changes the Postgres query timeout limit. The default is 14 seconds. Modify only when required.
|
||||
# POSTGRES_STATEMENT_TIMEOUT=14s
|
||||
RAILS_MAX_THREADS=5
|
||||
|
||||
# The email from which all outgoing emails are sent
|
||||
# could user either `email@yourdomain.com` or `BrandName <email@yourdomain.com>`
|
||||
MAILER_SENDER_EMAIL={{ company_name }} <support@{{ domain }}>
|
||||
|
||||
#SMTP domain key is set up for HELO checking
|
||||
SMTP_DOMAIN=mail.{{ domain }}
|
||||
# Set the value to "mailhog" if using docker-compose for development environments,
|
||||
# Set the value as "localhost" or your SMTP address in other environments
|
||||
# If SMTP_ADDRESS is empty, Chatwoot would try to use sendmail(postfix)
|
||||
SMTP_ADDRESS=support@{{ domain }}
|
||||
SMTP_PORT=587
|
||||
SMTP_USERNAME=support@{{ domain }} # Optional, only if SMTP server requires authentication
|
||||
SMTP_PASSWORD= # Optional, only if SMTP server requires authentication
|
||||
# plain,login,cram_md5
|
||||
SMTP_AUTHENTICATION=login
|
||||
SMTP_ENABLE_STARTTLS_AUTO=true
|
||||
# Can be: 'none', 'peer', 'client_once', 'fail_if_no_peer_cert', see http://api.rubyonrails.org/classes/ActionMailer/Base.html
|
||||
SMTP_OPENSSL_VERIFY_MODE=peer
|
||||
# Comment out the following environment variables if required by your SMTP server
|
||||
SMTP_TLS=true
|
||||
SMTP_SSL=
|
||||
|
||||
# Mail Incoming
|
||||
# This is the domain set for the reply emails when conversation continuity is enabled
|
||||
MAILER_INBOUND_EMAIL_DOMAIN={{ domain }}
|
||||
# Set this to appropriate ingress channel with regards to incoming emails
|
||||
# Possible values are :
|
||||
# relay for Exim, Postfix, Qmail
|
||||
# mailgun for Mailgun
|
||||
# mandrill for Mandrill
|
||||
# postmark for Postmark
|
||||
# sendgrid for Sendgrid
|
||||
RAILS_INBOUND_EMAIL_SERVICE=relay
|
||||
# Use one of the following based on the email ingress service
|
||||
# Ref: https://edgeguides.rubyonrails.org/action_mailbox_basics.html
|
||||
RAILS_INBOUND_EMAIL_PASSWORD= {{ chatwoot_rails_inbound_email_password }}
|
||||
MAILGUN_INGRESS_SIGNING_KEY=
|
||||
MANDRILL_INGRESS_API_KEY=
|
||||
|
||||
# Storage
|
||||
ACTIVE_STORAGE_SERVICE=local
|
||||
|
||||
# Amazon S3
|
||||
# documentation: https://www.chatwoot.com/docs/configuring-s3-bucket-as-cloud-storage
|
||||
S3_BUCKET_NAME=
|
||||
AWS_ACCESS_KEY_ID=
|
||||
AWS_SECRET_ACCESS_KEY=
|
||||
AWS_REGION=
|
||||
|
||||
# Log settings
|
||||
# Disable if you want to write logs to a file
|
||||
RAILS_LOG_TO_STDOUT=true
|
||||
LOG_LEVEL=info
|
||||
LOG_SIZE=500
|
||||
# Configure this environment variable if you want to use lograge instead of rails logger
|
||||
#LOGRAGE_ENABLED=true
|
||||
|
||||
### This environment variables are only required if you are setting up social media channels
|
||||
|
||||
# Facebook
|
||||
# documentation: https://www.chatwoot.com/docs/facebook-setup
|
||||
FB_VERIFY_TOKEN=
|
||||
FB_APP_SECRET=
|
||||
FB_APP_ID=
|
||||
|
||||
# https://developers.facebook.com/docs/messenger-platform/instagram/get-started#app-dashboard
|
||||
IG_VERIFY_TOKEN=
|
||||
|
||||
# Twitter
|
||||
# documentation: https://www.chatwoot.com/docs/twitter-app-setup
|
||||
TWITTER_APP_ID=
|
||||
TWITTER_CONSUMER_KEY=
|
||||
TWITTER_CONSUMER_SECRET=
|
||||
TWITTER_ENVIRONMENT=
|
||||
|
||||
#slack integration
|
||||
SLACK_CLIENT_ID=
|
||||
SLACK_CLIENT_SECRET=
|
||||
|
||||
# Google OAuth
|
||||
GOOGLE_OAUTH_CLIENT_ID=
|
||||
GOOGLE_OAUTH_CLIENT_SECRET=
|
||||
GOOGLE_OAUTH_CALLBACK_URL=
|
||||
|
||||
### Change this env variable only if you are using a custom build mobile app
|
||||
## Mobile app env variables
|
||||
IOS_APP_ID=L7YLMN4634.com.chatwoot.app
|
||||
ANDROID_BUNDLE_ID=com.chatwoot.app
|
||||
|
||||
# https://developers.google.com/android/guides/client-auth (use keytool to print the fingerprint in the first section)
|
||||
ANDROID_SHA256_CERT_FINGERPRINT=AC:73:8E:DE:EB:56:EA:CC:10:87:02:A7:65:37:7B:38:D4:5D:D4:53:F8:3B:FB:D3:C6:28:64:1D:AA:08:1E:D8
|
||||
|
||||
### Smart App Banner
|
||||
# https://developer.apple.com/library/archive/documentation/AppleApplications/Reference/SafariWebContent/PromotingAppswithAppBanners/PromotingAppswithAppBanners.html
|
||||
# You can find your app-id in https://itunesconnect.apple.com
|
||||
#IOS_APP_IDENTIFIER=1495796682
|
||||
|
||||
## Push Notification
|
||||
## generate a new key value here : https://d3v.one/vapid-key-generator/
|
||||
# VAPID_PUBLIC_KEY=
|
||||
# VAPID_PRIVATE_KEY=
|
||||
#
|
||||
# for mobile apps
|
||||
# FCM_SERVER_KEY=
|
||||
|
||||
### APM and Error Monitoring configurations
|
||||
## Elastic APM
|
||||
## https://www.elastic.co/guide/en/apm/agent/ruby/current/getting-started-rails.html
|
||||
# ELASTIC_APM_SERVER_URL=
|
||||
# ELASTIC_APM_SECRET_TOKEN=
|
||||
|
||||
## Sentry
|
||||
# SENTRY_DSN=
|
||||
|
||||
## LogRocket
|
||||
# LOG_ROCKET_PROJECT_ID=xxxxx/some-project
|
||||
|
||||
# MICROSOFT CLARITY
|
||||
# MS_CLARITY_TOKEN=xxxxxxxxx
|
||||
|
||||
## Scout
|
||||
## https://scoutapm.com/docs/ruby/configuration
|
||||
# SCOUT_KEY=YOURKEY
|
||||
# SCOUT_NAME=YOURAPPNAME (Production)
|
||||
# SCOUT_MONITOR=true
|
||||
|
||||
## NewRelic
|
||||
# https://docs.newrelic.com/docs/agents/ruby-agent/configuration/ruby-agent-configuration/
|
||||
# NEW_RELIC_LICENSE_KEY=
|
||||
# Set this to true to allow newrelic apm to send logs.
|
||||
# This is turned off by default.
|
||||
# NEW_RELIC_APPLICATION_LOGGING_ENABLED=
|
||||
|
||||
## Datadog
|
||||
## https://github.com/DataDog/dd-trace-rb/blob/master/docs/GettingStarted.md#environment-variables
|
||||
# DD_TRACE_AGENT_URL=
|
||||
|
||||
# MaxMindDB API key to download GeoLite2 City database
|
||||
# IP_LOOKUP_API_KEY=
|
||||
|
||||
## Rack Attack configuration
|
||||
## To prevent and throttle abusive requests
|
||||
# ENABLE_RACK_ATTACK=true
|
||||
|
||||
## Running chatwoot as an API only server
|
||||
## setting this value to true will disable the frontend dashboard endpoints
|
||||
# CW_API_ONLY_SERVER=false
|
||||
|
||||
## Development Only Config
|
||||
# if you want to use letter_opener for local emails
|
||||
# LETTER_OPENER=true
|
||||
# meant to be used in github codespaces
|
||||
# WEBPACKER_DEV_SERVER_PUBLIC=
|
||||
|
||||
# If you want to use official mobile app,
|
||||
# the notifications would be relayed via a Chatwoot server
|
||||
ENABLE_PUSH_RELAY_SERVER=true
|
||||
|
||||
# Stripe API key
|
||||
STRIPE_SECRET_KEY=
|
||||
STRIPE_WEBHOOK_SECRET=
|
||||
|
||||
# Set to true if you want to upload files to cloud storage using the signed url
|
||||
# Make sure to follow https://edgeguides.rubyonrails.org/active_storage_overview.html#cross-origin-resource-sharing-cors-configuration on the cloud storage after setting this to true.
|
||||
DIRECT_UPLOADS_ENABLED=
|
||||
|
||||
#MS OAUTH creds
|
||||
AZURE_APP_ID=
|
||||
AZURE_APP_SECRET=
|
||||
|
||||
## Advanced configurations
|
||||
## Change these values to fine tune performance
|
||||
# control the concurrency setting of sidekiq
|
||||
# SIDEKIQ_CONCURRENCY=10
|
||||
|
||||
|
||||
# AI powered features
|
||||
## OpenAI key
|
||||
OPENAI_API_KEY=
|
||||
|
||||
# Sentiment analysis model file path
|
||||
SENTIMENT_FILE_PATH=
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
rails:
|
||||
image: chatwoot/chatwoot:latest
|
||||
container_name: {{ customer }}-chatwoot-rails
|
||||
env_file: /opt/letsbe/env/chatwoot.env
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- '127.0.0.1:3011:3000'
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- RAILS_ENV=production
|
||||
- INSTALLATION_ENV=docker
|
||||
entrypoint: docker/entrypoints/rails.sh
|
||||
command: ['bundle', 'exec', 'rails', 's', '-p', '3000', '-b', '0.0.0.0']
|
||||
volumes:
|
||||
- {{ customer }}-chatwoot-storage:/app/storage
|
||||
- {{ customer }}-chatwoot-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipv4_address: 172.20.1.2
|
||||
|
||||
sidekiq:
|
||||
container_name: {{ customer }}-chatwoot-sidekiq
|
||||
image: chatwoot/chatwoot:latest
|
||||
restart: always
|
||||
env_file: /opt/letsbe/env/chatwoot.env
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
- RAILS_ENV=production
|
||||
- INSTALLATION_ENV=docker
|
||||
command: ['bundle', 'exec', 'sidekiq', '-C', 'config/sidekiq.yml']
|
||||
volumes:
|
||||
- {{ customer }}-chatwoot-storage:/app/storage
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipv4_address: 172.20.1.3
|
||||
|
||||
postgres:
|
||||
container_name: {{ customer }}-chatwoot-postgres
|
||||
image: pgvector/pgvector:pg16
|
||||
restart: always
|
||||
ports:
|
||||
- '127.0.0.1:3049:5432'
|
||||
volumes:
|
||||
- {{ customer }}-chatwoot-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-chatwoot-backups:/tmp/backups
|
||||
environment:
|
||||
- POSTGRES_DB=chatwoot_production
|
||||
- POSTGRES_USER={{ chatwoot_postgres_username }}
|
||||
# Please provide your own password.
|
||||
- POSTGRES_PASSWORD={{ chatwoot_postgres_password }}
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipv4_address: 172.20.1.4
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: {{ customer }}-chatwoot-redis
|
||||
restart: always
|
||||
command: ["sh", "-c", "redis-server --requirepass \"$REDIS_PASSWORD\""]
|
||||
env_file: /opt/letsbe/env/chatwoot.env
|
||||
volumes:
|
||||
- {{ customer }}-chatwoot-redis:/data
|
||||
ports:
|
||||
- '127.0.0.1:3050:6379'
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipv4_address: 172.20.1.5
|
||||
|
||||
getmail:
|
||||
image: python:3.12-alpine
|
||||
container_name: {{ customer }}-chatwoot-getmail
|
||||
restart: always
|
||||
depends_on:
|
||||
- rails
|
||||
environment:
|
||||
INGRESS_PASSWORD: ${RAILS_INBOUND_EMAIL_PASSWORD}
|
||||
CHATWOOT_RELAY_URL: http://rails:3000/rails/action_mailbox/relay/inbound_emails
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /opt/letsbe/stacks/chatwoot/getmail
|
||||
target: /opt/getmail
|
||||
entrypoint: >
|
||||
sh -c "
|
||||
apk add --no-cache curl ca-certificates &&
|
||||
pip install --no-cache-dir getmail6 &&
|
||||
chmod +x /opt/getmail/import_mail_to_chatwoot || true &&
|
||||
while true; do
|
||||
for f in /opt/getmail/getmailrc /opt/getmail/getmailrc-*; do
|
||||
[ -f \"$f\" ] || continue
|
||||
getmail --getmaildir /opt/getmail --rcfile \"$(basename \"$f\")\" --quiet > done
|
||||
sleep 60
|
||||
done
|
||||
"
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipv4_address: 172.20.1.6
|
||||
|
||||
networks:
|
||||
{{ customer }}-chatwoot:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.1.0/28
|
||||
gateway: 172.20.1.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-chatwoot-storage:
|
||||
{{ customer }}-chatwoot-postgres:
|
||||
{{ customer }}-chatwoot-redis:
|
||||
{{ customer }}-chatwoot-backups:
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
watch:
|
||||
workers: 20
|
||||
schedule: "0 */6 * * *"
|
||||
jitter: 30s
|
||||
firstCheckNotif: true
|
||||
|
||||
providers:
|
||||
docker:
|
||||
watchStopped: true
|
||||
watchByDefault: false
|
||||
|
||||
notif:
|
||||
mail:
|
||||
host: mail.{{ domain }} # your mail server (Poste in your case)
|
||||
port: 465 # SSL port
|
||||
ssl: true
|
||||
insecureSkipVerify: false
|
||||
username: updates@{{ domain }} # change to your sender address
|
||||
password: ##EmailPassword # use a strong app password
|
||||
from: updates@{{ domain }}
|
||||
to: matt@letsbe.solutions
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
diun:
|
||||
container_name: {{ customer }}-diun
|
||||
image: crazymax/diun:latest
|
||||
command: serve
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- ./data:/data
|
||||
- ./diun.yml:/diun.yml:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- TZ=Europe/Paris
|
||||
- DIUN_CONFIG=/diun.yml
|
||||
- LOG_LEVEL=info
|
||||
- LOG_JSON=false
|
||||
restart: always
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# Database Settings
|
||||
POSTGRES_USER={{ documenso_postgres_user }}
|
||||
POSTGRES_PASSWORD={{ documenso_postgres_password }}
|
||||
POSTGRES_DB=documenso_db
|
||||
POSTGRES_PORT=5432
|
||||
|
||||
# Documenso App Settings
|
||||
DOCUMENSO_PORT=3020
|
||||
NEXTAUTH_URL=https://{{ domain_documenso }}
|
||||
NEXTAUTH_SECRET={{ documenso_nextauth_secret }}
|
||||
NEXT_PRIVATE_ENCRYPTION_KEY={{ documenso_encryption_key }}
|
||||
NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY={{ documenso_encryption_secondary_key }}
|
||||
NEXT_PRIVATE_GOOGLE_CLIENT_ID=
|
||||
NEXT_PRIVATE_GOOGLE_CLIENT_SECRET=
|
||||
NEXT_PUBLIC_WEBAPP_URL=https://{{ domain_documenso }}
|
||||
NEXT_PUBLIC_MARKETING_URL=https://{{ domain }}
|
||||
NEXT_PRIVATE_DATABASE_URL=postgres://{{ documenso_postgres_user }}:{{ documenso_postgres_password }}@{{ customer }}-documenso-db:5432/documenso_db
|
||||
NEXT_PRIVATE_DIRECT_DATABASE_URL=postgres://{{ documenso_postgres_user }}:{{ documenso_postgres_password }}@{{ customer }}-documenso-db:5432/documenso_db
|
||||
NEXT_PUBLIC_UPLOAD_TRANSPORT=db
|
||||
NEXT_PRIVATE_UPLOAD_ENDPOINT=https://{{ domain_s3 }}
|
||||
NEXT_PRIVATE_UPLOAD_FORCE_PATH_STYLE=true
|
||||
NEXT_PRIVATE_UPLOAD_REGION=eu-central-1
|
||||
NEXT_PRIVATE_UPLOAD_BUCKET=signatures
|
||||
NEXT_PRIVATE_UPLOAD_ACCESS_KEY_ID={{ minio_root_user }}
|
||||
NEXT_PRIVATE_UPLOAD_SECRET_ACCESS_KEY={{ minio_root_password }}
|
||||
NEXT_PRIVATE_SMTP_TRANSPORT=smtp-auth
|
||||
NEXT_PRIVATE_SMTP_HOST={{ domain_poste }}
|
||||
NEXT_PRIVATE_SMTP_PORT=465
|
||||
NEXT_PRIVATE_SMTP_USERNAME=noreply@{{ domain }}
|
||||
NEXT_PRIVATE_SMTP_PASSWORD=
|
||||
NEXT_PRIVATE_SMTP_APIKEY_USER=
|
||||
NEXT_PRIVATE_SMTP_APIKEY=
|
||||
NEXT_PRIVATE_SMTP_SECURE=true
|
||||
NEXT_PRIVATE_SMTP_FROM_NAME="{{ company_name }} Signatures"
|
||||
NEXT_PRIVATE_SMTP_FROM_ADDRESS=noreply@{{ domain }}
|
||||
NEXT_PRIVATE_SMTP_SERVICE=
|
||||
NEXT_PRIVATE_RESEND_API_KEY=
|
||||
NEXT_PRIVATE_MAILCHANNELS_API_KEY=
|
||||
NEXT_PRIVATE_MAILCHANNELS_ENDPOINT=
|
||||
NEXT_PRIVATE_MAILCHANNELS_DKIM_DOMAIN=
|
||||
NEXT_PRIVATE_MAILCHANNELS_DKIM_SELECTOR=
|
||||
NEXT_PRIVATE_MAILCHANNELS_DKIM_PRIVATE_KEY=
|
||||
NEXT_PUBLIC_DOCUMENT_SIZE_UPLOAD_LIMIT=50MB
|
||||
NEXT_PUBLIC_POSTHOG_KEY=
|
||||
NEXT_PUBLIC_DISABLE_SIGNUP=true
|
||||
NEXT_PRIVATE_SIGNING_LOCAL_FILE_PATH=/opt/documenso/certificate.p12
|
||||
NEXT_PRIVATE_SIGNING_PASSPHRASE=
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
database:
|
||||
container_name: {{ customer }}-documenso-db
|
||||
image: postgres:15
|
||||
restart: always
|
||||
env_file:
|
||||
- /opt/letsbe/env/documenso.env
|
||||
environment:
|
||||
- POSTGRES_USER={{ documenso_postgres_user }}
|
||||
- POSTGRES_PASSWORD={{ documenso_postgres_password }}
|
||||
- POSTGRES_DB=documenso_db
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U {{ documenso_postgres_user }} -d documenso_db']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
volumes:
|
||||
- {{ customer }}-documenso-database:/var/lib/postgresql/data
|
||||
- {{ customer }}-documenso-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-documenso:
|
||||
ipv4_address: 172.20.29.2
|
||||
ports:
|
||||
- "127.0.0.1:5432:5432"
|
||||
|
||||
documenso:
|
||||
container_name: {{ customer }}-documenso-app
|
||||
image: documenso/documenso:latest
|
||||
restart: always
|
||||
depends_on:
|
||||
database:
|
||||
condition: service_healthy
|
||||
env_file:
|
||||
- /opt/letsbe/env/documenso.env
|
||||
environment:
|
||||
- PORT=3020
|
||||
- NEXT_PRIVATE_INTERNAL_WEBAPP_URL=https://{{ domain_documenso }}
|
||||
ports:
|
||||
- "127.0.0.1:3020:3020"
|
||||
volumes:
|
||||
- /opt/documenso/certificate.p12:/opt/documenso/certificate.p12
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
networks:
|
||||
{{ customer }}-documenso:
|
||||
ipv4_address: 172.20.29.3
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
|
||||
networks:
|
||||
{{ customer }}-documenso:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.29.0/28
|
||||
|
||||
volumes:
|
||||
{{ customer }}-documenso-database:
|
||||
{{ customer }}-documenso-backups:
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
ghost:
|
||||
container_name: {{ customer }}-ghost
|
||||
image: ghost:alpine
|
||||
restart: always
|
||||
ports:
|
||||
- "127.0.0.1:2368:2368"
|
||||
environment:
|
||||
# see https://ghost.org/docs/config/#configuration-options
|
||||
database__client: mysql
|
||||
database__connection__host: {{ customer }}-ghost-db
|
||||
database__connection__user: root
|
||||
database__connection__password: {{ ghost_mysql_password }}
|
||||
database__connection__database: ghost
|
||||
url: https://{{ domain_ghost }}
|
||||
volumes:
|
||||
- {{ customer }}-ghost-data:/var/lib/ghost/content
|
||||
- ./config.production.json:/var/lib/ghost/config.production.json
|
||||
networks:
|
||||
{{ customer }}-ghost:
|
||||
ipv4_address: 172.20.30.2
|
||||
depends_on:
|
||||
- ghost-db
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
|
||||
ghost-db:
|
||||
container_name: {{ customer }}-ghost-db
|
||||
image: mysql:8.0
|
||||
restart: always
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: {{ ghost_mysql_password }}
|
||||
MYSQL_DATABASE: ghost
|
||||
volumes:
|
||||
- {{ customer }}-ghost-db:/var/lib/mysql
|
||||
- {{ customer }}-ghost-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-ghost:
|
||||
ipv4_address: 172.20.30.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-ghost:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.30.0/28
|
||||
|
||||
volumes:
|
||||
{{ customer }}-ghost-data:
|
||||
{{ customer }}-ghost-db:
|
||||
{{ customer }}-ghost-backups:
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
drone_gitea:
|
||||
container_name: {{ customer }}-drone_gitea
|
||||
image: drone/drone:2
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-drone-gitea-data:/data
|
||||
- {{ customer }}-drone-gitea-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3009:80"
|
||||
# - "127.0.0.1:3010:443"
|
||||
environment:
|
||||
DRONE_GITEA_SERVER: 'https://{{ domain_gitea }}'
|
||||
DRONE_GITEA_CLIENT_ID: ''
|
||||
DRONE_GITEA_CLIENT_SECRET: ''
|
||||
DRONE_RPC_SECRET: '{{ drone_gitea_rpc_secret }}'
|
||||
DRONE_SERVER_HOST: '{{ domain_gitea_drone }}'
|
||||
DRONE_SERVER_PROTO: https
|
||||
networks:
|
||||
{{ customer }}-drone-gitea:
|
||||
ipv4_address: 172.20.2.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-drone-gitea:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.2.0/28
|
||||
gateway: 172.20.2.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-drone-gitea-data:
|
||||
{{ customer }}-drone-gitea-backups:
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
### - POSTGRES - ###
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
gitea:
|
||||
container_name: {{ customer }}-gitea
|
||||
image: gitea/gitea:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-gitea-data:/data
|
||||
- {{ customer }}-gitea-backups:/tmp/backups
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "127.0.0.1:3007:3000"
|
||||
- "3036:22"
|
||||
environment:
|
||||
USER_UID: 1000
|
||||
USER_GID: 1000
|
||||
GITEA__database__DB_TYPE: postgres
|
||||
GITEA__database__HOST: {{ customer }}-gitea-db:5432
|
||||
GITEA__database__NAME: 'gitea'
|
||||
GITEA__database__USER: '{{ gitea_postgres_user }}'
|
||||
GITEA__database__PASSWD: '{{ gitea_postgres_password }}'
|
||||
networks:
|
||||
{{ customer }}-gitea:
|
||||
ipv4_address: 172.20.3.2
|
||||
depends_on:
|
||||
- gitea-db
|
||||
|
||||
gitea-db:
|
||||
container_name: {{ customer }}-gitea-db
|
||||
image: postgres:14
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: '{{ gitea_postgres_user }}'
|
||||
POSTGRES_PASSWORD: '{{ gitea_postgres_password }}'
|
||||
POSTGRES_DB: 'gitea'
|
||||
volumes:
|
||||
- {{ customer }}-gitea-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-gitea-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-gitea:
|
||||
ipv4_address: 172.20.3.3
|
||||
|
||||
# runner:
|
||||
# image: gitea/act_runner:latest-dind-rootless
|
||||
# restart: always
|
||||
# privileged: true
|
||||
# volumes:
|
||||
# - {{ customer }}-gitea-runner:/data
|
||||
# environment:
|
||||
# - GITEA_INSTANCE_URL=https://{{ domain_gitea }}
|
||||
# - DOCKER_HOST=unix:///var/run/user/1000/docker.sock
|
||||
# - GITEA_RUNNER_REGISTRATION_TOKEN=<registration token>
|
||||
# networks:
|
||||
# {{ customer }}-gitea:
|
||||
# ipv4_address: 172.20.3.5
|
||||
# depends_on:
|
||||
# - gitea
|
||||
|
||||
networks:
|
||||
{{ customer }}-gitea:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.3.0/28
|
||||
gateway: 172.20.3.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-gitea-data:
|
||||
{{ customer }}-gitea-postgres:
|
||||
{{ customer }}-gitea-runner:
|
||||
{{ customer }}-gitea-backups:
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
version: "3.8"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
container_name: {{ customer }}-glitchtip-postgres
|
||||
image: postgres:15
|
||||
ports:
|
||||
- "127.0.0.1:3046:5432"
|
||||
environment:
|
||||
#POSTGRES_HOST_AUTH_METHOD: "trust" # Consider removing this and setting a password
|
||||
POSTGRES_PASSWORD: '{{ glitchtip_database_password }}'
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-glitchtip-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-glitchtip-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipv4_address: 172.20.4.2
|
||||
|
||||
redis:
|
||||
image: redis
|
||||
container_name: {{ customer }}-glitchtip_redis
|
||||
restart: always
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipv4_address: 172.20.4.3
|
||||
|
||||
web:
|
||||
container_name: {{ customer }}-glitchtip-web
|
||||
image: glitchtip/glitchtip
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
ports:
|
||||
- "127.0.0.1:3017:8000"
|
||||
environment:
|
||||
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
|
||||
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
|
||||
PORT: 8000
|
||||
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
|
||||
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
|
||||
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
|
||||
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
|
||||
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-glitchtip-uploads:/code/uploads
|
||||
- {{ customer }}-glitchtip-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipv4_address: 172.20.4.4
|
||||
|
||||
worker:
|
||||
container_name: {{ customer }}-glitchtip-worker
|
||||
image: glitchtip/glitchtip
|
||||
command: ./bin/run-celery-with-beat.sh
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
environment:
|
||||
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
|
||||
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
|
||||
PORT: 8000
|
||||
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
|
||||
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
|
||||
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
|
||||
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
|
||||
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-glitchtip-uploads:/code/uploads
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipv4_address: 172.20.4.5
|
||||
|
||||
migrate:
|
||||
container_name: {{ customer }}-glitchtip-migrate
|
||||
image: glitchtip/glitchtip
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
command: "./manage.py migrate"
|
||||
environment:
|
||||
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
|
||||
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
|
||||
PORT: 8000
|
||||
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
|
||||
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
|
||||
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
|
||||
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
|
||||
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipv4_address: 172.20.4.6
|
||||
|
||||
networks:
|
||||
{{ customer }}-glitchtip:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.4.0/28
|
||||
gateway: 172.20.4.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-glitchtip-postgres:
|
||||
{{ customer }}-glitchtip-uploads:
|
||||
{{ customer }}-glitchtip-backups:
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
html:
|
||||
container_name: {{ customer }}-html-website
|
||||
image: nginx
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-html:/usr/share/nginx/html:ro
|
||||
- {{ customer }}-html-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3000:80"
|
||||
networks:
|
||||
{{ customer }}-html:
|
||||
ipv4_address: 172.20.5.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-html:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.5.0/28
|
||||
gateway: 172.20.5.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-html:
|
||||
{{ customer }}-html-backups:
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD={{ keycloak_admin_password }}
|
||||
KC_DB=postgres
|
||||
KC_DB_URL=jdbc:postgresql://{{ customer }}-keycloak-db:5432/keycloak
|
||||
KC_DB_USERNAME=keycloak
|
||||
KC_DB_PASSWORD={{ keycloak_postgres_password }}
|
||||
KC_HOSTNAME_STRICT=false
|
||||
KC_PROXY=edge
|
||||
KC_HTTP_RELATIVE_PATH=/
|
||||
KC_HEALTH_ENABLED=true
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
postgres:
|
||||
container_name: {{ customer }}-keycloak-db
|
||||
image: postgres:14
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-keycloak-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-keycloak-backups:/tmp/backups
|
||||
environment:
|
||||
POSTGRES_DB: keycloak
|
||||
POSTGRES_USER: keycloak
|
||||
POSTGRES_PASSWORD: {{ keycloak_postgres_password }}
|
||||
networks:
|
||||
{{ customer }}-keycloak:
|
||||
ipv4_address: 172.20.31.2
|
||||
|
||||
keycloak:
|
||||
container_name: {{ customer }}-keycloak
|
||||
image: quay.io/keycloak/keycloak:latest
|
||||
restart: always
|
||||
command: start
|
||||
environment:
|
||||
KC_DB: postgres
|
||||
KC_DB_URL: jdbc:postgresql://{{ customer }}-keycloak-db:5432/keycloak
|
||||
KC_DB_USERNAME: keycloak
|
||||
KC_DB_PASSWORD: {{ keycloak_postgres_password }}
|
||||
KEYCLOAK_ADMIN: admin
|
||||
KEYCLOAK_ADMIN_PASSWORD: {{ keycloak_admin_password }}
|
||||
KC_HOSTNAME_STRICT: false
|
||||
KC_PROXY: edge
|
||||
KC_HTTP_RELATIVE_PATH: /
|
||||
KC_HEALTH_ENABLED: true
|
||||
depends_on:
|
||||
- postgres
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
networks:
|
||||
{{ customer }}-keycloak:
|
||||
ipv4_address: 172.20.31.3
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
|
||||
networks:
|
||||
{{ customer }}-keycloak:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.31.0/28
|
||||
|
||||
volumes:
|
||||
{{ customer }}-keycloak-postgres:
|
||||
{{ customer }}-keycloak-backups:
|
||||
|
|
@ -0,0 +1,574 @@
|
|||
#=====================================================================#
|
||||
# LibreChat Configuration #
|
||||
#=====================================================================#
|
||||
# Please refer to the reference documentation for assistance #
|
||||
# with configuring your LibreChat environment. #
|
||||
# #
|
||||
# https://www.librechat.ai/docs/configuration/dotenv #
|
||||
#=====================================================================#
|
||||
|
||||
#==================================================#
|
||||
# Server Configuration #
|
||||
#==================================================#
|
||||
|
||||
HOST=0.0.0.0
|
||||
PORT=3080
|
||||
|
||||
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
|
||||
|
||||
DOMAIN_CLIENT=https://{{ domain_librechat }}
|
||||
DOMAIN_SERVER=https://{{ domain_librechat }}
|
||||
|
||||
NO_INDEX=true
|
||||
# Use the address that is at most n number of hops away from the Express application.
|
||||
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
|
||||
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
|
||||
# Defaulted to 1.
|
||||
TRUST_PROXY=1
|
||||
|
||||
#===============#
|
||||
# JSON Logging #
|
||||
#===============#
|
||||
|
||||
# Use when process console logs in cloud deployment like GCP/AWS
|
||||
CONSOLE_JSON=false
|
||||
|
||||
#===============#
|
||||
# Debug Logging #
|
||||
#===============#
|
||||
|
||||
DEBUG_LOGGING=true
|
||||
DEBUG_CONSOLE=false
|
||||
|
||||
#=============#
|
||||
# Permissions #
|
||||
#=============#
|
||||
|
||||
# UID=1000
|
||||
# GID=1000
|
||||
|
||||
#===============#
|
||||
# Configuration #
|
||||
#===============#
|
||||
# Use an absolute path, a relative path, or a URL
|
||||
|
||||
# CONFIG_PATH="/alternative/path/to/librechat.yaml"
|
||||
|
||||
#===================================================#
|
||||
# Endpoints #
|
||||
#===================================================#
|
||||
|
||||
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
|
||||
|
||||
PROXY=
|
||||
|
||||
#===================================#
|
||||
# Known Endpoints - librechat.yaml #
|
||||
#===================================#
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
|
||||
|
||||
# ANYSCALE_API_KEY=
|
||||
# APIPIE_API_KEY=
|
||||
# COHERE_API_KEY=
|
||||
# DEEPSEEK_API_KEY=
|
||||
# DATABRICKS_API_KEY=
|
||||
# FIREWORKS_API_KEY=
|
||||
# GROQ_API_KEY=
|
||||
# HUGGINGFACE_TOKEN=
|
||||
# MISTRAL_API_KEY=
|
||||
# OPENROUTER_KEY=
|
||||
# PERPLEXITY_API_KEY=
|
||||
# SHUTTLEAI_API_KEY=
|
||||
# TOGETHERAI_API_KEY=
|
||||
# UNIFY_API_KEY=
|
||||
# XAI_API_KEY=
|
||||
|
||||
#============#
|
||||
# Anthropic #
|
||||
#============#
|
||||
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
|
||||
# ANTHROPIC_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# Azure #
|
||||
#============#
|
||||
|
||||
# Note: these variables are DEPRECATED
|
||||
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
|
||||
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
|
||||
|
||||
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
|
||||
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
|
||||
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
|
||||
# AZURE_API_KEY= # Deprecated
|
||||
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_VERSION= # Deprecated
|
||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
|
||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
|
||||
# PLUGINS_USE_AZURE="true" # Deprecated
|
||||
|
||||
#=================#
|
||||
# AWS Bedrock #
|
||||
#=================#
|
||||
|
||||
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
|
||||
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
|
||||
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
|
||||
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
|
||||
|
||||
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
|
||||
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
|
||||
|
||||
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
|
||||
|
||||
# Notes on specific models:
|
||||
# The following models are not support due to not supporting streaming:
|
||||
# ai21.j2-mid-v1
|
||||
|
||||
# The following models are not support due to not supporting conversation history:
|
||||
# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
|
||||
|
||||
#============#
|
||||
# Google #
|
||||
#============#
|
||||
|
||||
GOOGLE_KEY=
|
||||
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-exp-03-25,gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-pro
|
||||
|
||||
# GOOGLE_LOC=us-central1
|
||||
|
||||
# Google Safety Settings
|
||||
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
|
||||
#
|
||||
# For Vertex AI:
|
||||
# To use the BLOCK_NONE setting, you need either:
|
||||
# (a) Access through an allowlist via your Google account team, or
|
||||
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
|
||||
#
|
||||
# For Gemini API (AI Studio):
|
||||
# BLOCK_NONE is available by default, no special account requirements.
|
||||
#
|
||||
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
|
||||
#
|
||||
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
|
||||
|
||||
#============#
|
||||
# OpenAI #
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=
|
||||
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
# TITLE_CONVO=false
|
||||
# OPENAI_TITLE_MODEL=gpt-4o-mini
|
||||
|
||||
# OPENAI_SUMMARIZE=true
|
||||
# OPENAI_SUMMARY_MODEL=gpt-4o-mini
|
||||
|
||||
# OPENAI_FORCE_PROMPT=true
|
||||
|
||||
# OPENAI_REVERSE_PROXY=
|
||||
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
#====================#
|
||||
# Assistants API #
|
||||
#====================#
|
||||
|
||||
ASSISTANTS_API_KEY=
|
||||
# ASSISTANTS_BASE_URL=
|
||||
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
|
||||
#==========================#
|
||||
# Azure Assistants API #
|
||||
#==========================#
|
||||
|
||||
# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
|
||||
# The models for Azure Assistants are also determined by your Azure OpenAI configuration.
|
||||
|
||||
# More info, including how to enable use of Assistants with Azure here:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||
|
||||
#============#
|
||||
# Plugins #
|
||||
#============#
|
||||
|
||||
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
|
||||
DEBUG_PLUGINS=true
|
||||
|
||||
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
||||
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
||||
|
||||
# Azure AI Search
|
||||
#-----------------
|
||||
AZURE_AI_SEARCH_SERVICE_ENDPOINT=
|
||||
AZURE_AI_SEARCH_INDEX_NAME=
|
||||
AZURE_AI_SEARCH_API_KEY=
|
||||
|
||||
AZURE_AI_SEARCH_API_VERSION=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
|
||||
|
||||
# DALL·E
|
||||
#----------------
|
||||
DALLE_API_KEY=
|
||||
DALLE3_API_KEY=
|
||||
DALLE2_API_KEY=
|
||||
# DALLE3_SYSTEM_PROMPT=
|
||||
# DALLE2_SYSTEM_PROMPT=
|
||||
# DALLE_REVERSE_PROXY=
|
||||
# DALLE3_BASEURL=
|
||||
# DALLE2_BASEURL=
|
||||
|
||||
# DALL·E (via Azure OpenAI)
|
||||
# Note: requires some of the variables above to be set
|
||||
#----------------
|
||||
# DALLE3_AZURE_API_VERSION=
|
||||
# DALLE2_AZURE_API_VERSION=
|
||||
|
||||
# Flux
|
||||
#-----------------
|
||||
FLUX_API_BASE_URL=https://api.us1.bfl.ai
|
||||
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
|
||||
|
||||
# Get your API key at https://api.us1.bfl.ai/auth/profile
|
||||
# FLUX_API_KEY=
|
||||
|
||||
# Google
|
||||
#-----------------
|
||||
GOOGLE_SEARCH_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
# YOUTUBE
|
||||
#-----------------
|
||||
YOUTUBE_API_KEY=
|
||||
|
||||
# SerpAPI
|
||||
#-----------------
|
||||
SERPAPI_API_KEY=
|
||||
|
||||
# Stable Diffusion
|
||||
#-----------------
|
||||
SD_WEBUI_URL=http://host.docker.internal:7860
|
||||
|
||||
# Tavily
|
||||
#-----------------
|
||||
TAVILY_API_KEY=
|
||||
|
||||
# Traversaal
|
||||
#-----------------
|
||||
TRAVERSAAL_API_KEY=
|
||||
|
||||
# WolframAlpha
|
||||
#-----------------
|
||||
WOLFRAM_APP_ID=
|
||||
|
||||
# Zapier
|
||||
#-----------------
|
||||
ZAPIER_NLA_API_KEY=
|
||||
|
||||
#==================================================#
|
||||
# Search #
|
||||
#==================================================#
|
||||
|
||||
SEARCH=true
|
||||
MEILI_NO_ANALYTICS=true
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
MEILI_MASTER_KEY=6211530205576eaa3d97d215d5c12813
|
||||
|
||||
# Optional: Disable indexing, useful in a multi-node setup
|
||||
# where only one instance should perform an index sync.
|
||||
# MEILI_NO_SYNC=true
|
||||
|
||||
#==================================================#
|
||||
# Speech to Text & Text to Speech #
|
||||
#==================================================#
|
||||
|
||||
STT_API_KEY=
|
||||
TTS_API_KEY=
|
||||
|
||||
#==================================================#
|
||||
# RAG #
|
||||
#==================================================#
|
||||
# More info: https://www.librechat.ai/docs/configuration/rag_api
|
||||
|
||||
#RAG_OPENAI_BASEURL=http://localhost:8000
|
||||
RAG_OPENAI_API_KEY=
|
||||
RAG_USE_FULL_CONTEXT=true
|
||||
EMBEDDINGS_PROVIDER=openai
|
||||
EMBEDDINGS_MODEL=text-embedding-3-small
|
||||
POSTGRES_DB=librechat
|
||||
POSTGRES_USER={{ librechat_postgres_user}}
|
||||
POSTGRES_PASSWORD={{ librechat_postgres_password }}
|
||||
DEBUG_RAG_API=true
|
||||
|
||||
#===================================================#
|
||||
# User System #
|
||||
#===================================================#
|
||||
|
||||
#========================#
|
||||
# Moderation #
|
||||
#========================#
|
||||
|
||||
OPENAI_MODERATION=false
|
||||
OPENAI_MODERATION_API_KEY=
|
||||
# OPENAI_MODERATION_REVERSE_PROXY=
|
||||
|
||||
BAN_VIOLATIONS=true
|
||||
BAN_DURATION=1000 * 60 * 60 * 2
|
||||
BAN_INTERVAL=20
|
||||
|
||||
LOGIN_VIOLATION_SCORE=1
|
||||
REGISTRATION_VIOLATION_SCORE=1
|
||||
CONCURRENT_VIOLATION_SCORE=1
|
||||
MESSAGE_VIOLATION_SCORE=1
|
||||
NON_BROWSER_VIOLATION_SCORE=20
|
||||
|
||||
LOGIN_MAX=7
|
||||
LOGIN_WINDOW=5
|
||||
REGISTER_MAX=5
|
||||
REGISTER_WINDOW=60
|
||||
|
||||
LIMIT_CONCURRENT_MESSAGES=true
|
||||
CONCURRENT_MESSAGE_MAX=2
|
||||
|
||||
LIMIT_MESSAGE_IP=true
|
||||
MESSAGE_IP_MAX=40
|
||||
MESSAGE_IP_WINDOW=1
|
||||
|
||||
LIMIT_MESSAGE_USER=false
|
||||
MESSAGE_USER_MAX=40
|
||||
MESSAGE_USER_WINDOW=1
|
||||
|
||||
ILLEGAL_MODEL_REQ_SCORE=5
|
||||
|
||||
#========================#
|
||||
# Balance #
|
||||
#========================#
|
||||
|
||||
# CHECK_BALANCE=false
|
||||
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
|
||||
|
||||
|
||||
#========================#
|
||||
# Registration and Login #
|
||||
#========================#
|
||||
|
||||
ALLOW_EMAIL_LOGIN=true
|
||||
ALLOW_REGISTRATION=true
|
||||
ALLOW_SOCIAL_LOGIN=true
|
||||
ALLOW_SOCIAL_REGISTRATION=true
|
||||
ALLOW_PASSWORD_RESET=false
|
||||
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
|
||||
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
|
||||
|
||||
SESSION_EXPIRY=1000 * 60 * 15
|
||||
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
|
||||
|
||||
JWT_SECRET={{ librechat_jwt_secret }}
|
||||
JWT_REFRESH_SECRET={{ librechat_jwt_refresh_secret }}
|
||||
|
||||
# Discord
|
||||
DISCORD_CLIENT_ID=
|
||||
DISCORD_CLIENT_SECRET=
|
||||
DISCORD_CALLBACK_URL=/oauth/discord/callback
|
||||
|
||||
# Facebook
|
||||
FACEBOOK_CLIENT_ID=
|
||||
FACEBOOK_CLIENT_SECRET=
|
||||
FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
|
||||
|
||||
# GitHub
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback
|
||||
# GitHub Enterprise
|
||||
# GITHUB_ENTERPRISE_BASE_URL=
|
||||
# GITHUB_ENTERPRISE_USER_AGENT=
|
||||
|
||||
# Google
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
GOOGLE_CALLBACK_URL=/oauth/google/callback
|
||||
|
||||
# Apple
|
||||
APPLE_CLIENT_ID=
|
||||
APPLE_TEAM_ID=
|
||||
APPLE_KEY_ID=
|
||||
APPLE_PRIVATE_KEY_PATH=
|
||||
APPLE_CALLBACK_URL=/oauth/apple/callback
|
||||
|
||||
# OpenID
|
||||
OPENID_CLIENT_ID=
|
||||
OPENID_CLIENT_SECRET=
|
||||
OPENID_ISSUER=
|
||||
OPENID_SESSION_SECRET=
|
||||
OPENID_SCOPE="openid profile email"
|
||||
OPENID_CALLBACK_URL=/oauth/openid/callback
|
||||
OPENID_REQUIRED_ROLE=
|
||||
OPENID_REQUIRED_ROLE_TOKEN_KIND=
|
||||
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's username
|
||||
OPENID_USERNAME_CLAIM=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
||||
OPENID_NAME_CLAIM=
|
||||
|
||||
OPENID_BUTTON_LABEL=
|
||||
OPENID_IMAGE_URL=
|
||||
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
|
||||
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
|
||||
OPENID_AUTO_REDIRECT=false
|
||||
|
||||
# LDAP
|
||||
LDAP_URL=
|
||||
LDAP_BIND_DN=
|
||||
LDAP_BIND_CREDENTIALS=
|
||||
LDAP_USER_SEARCH_BASE=
|
||||
#LDAP_SEARCH_FILTER="mail="
|
||||
LDAP_CA_CERT_PATH=
|
||||
# LDAP_TLS_REJECT_UNAUTHORIZED=
|
||||
# LDAP_STARTTLS=
|
||||
# LDAP_LOGIN_USES_USERNAME=true
|
||||
# LDAP_ID=
|
||||
# LDAP_USERNAME=
|
||||
# LDAP_EMAIL=
|
||||
# LDAP_FULL_NAME=
|
||||
|
||||
#========================#
|
||||
# Email Password Reset #
|
||||
#========================#
|
||||
|
||||
EMAIL_SERVICE=
|
||||
EMAIL_HOST=
|
||||
EMAIL_PORT=25
|
||||
EMAIL_ENCRYPTION=
|
||||
EMAIL_ENCRYPTION_HOSTNAME=
|
||||
EMAIL_ALLOW_SELFSIGNED=
|
||||
EMAIL_USERNAME=
|
||||
EMAIL_PASSWORD=
|
||||
EMAIL_FROM_NAME=
|
||||
EMAIL_FROM=noreply@librechat.ai
|
||||
|
||||
#========================#
|
||||
# Firebase CDN #
|
||||
#========================#
|
||||
|
||||
FIREBASE_API_KEY=
|
||||
FIREBASE_AUTH_DOMAIN=
|
||||
FIREBASE_PROJECT_ID=
|
||||
FIREBASE_STORAGE_BUCKET=
|
||||
FIREBASE_MESSAGING_SENDER_ID=
|
||||
FIREBASE_APP_ID=
|
||||
|
||||
#========================#
|
||||
# S3 AWS Bucket #
|
||||
#========================#
|
||||
|
||||
AWS_ENDPOINT_URL=
|
||||
AWS_ACCESS_KEY_ID=
|
||||
AWS_SECRET_ACCESS_KEY=
|
||||
AWS_REGION=
|
||||
AWS_BUCKET_NAME=
|
||||
|
||||
#========================#
|
||||
# Azure Blob Storage #
|
||||
#========================#
|
||||
|
||||
AZURE_STORAGE_CONNECTION_STRING=
|
||||
AZURE_STORAGE_PUBLIC_ACCESS=false
|
||||
AZURE_CONTAINER_NAME=files
|
||||
|
||||
#========================#
|
||||
# Shared Links #
|
||||
#========================#
|
||||
|
||||
ALLOW_SHARED_LINKS=true
|
||||
ALLOW_SHARED_LINKS_PUBLIC=true
|
||||
|
||||
#==============================#
|
||||
# Static File Cache Control #
|
||||
#==============================#
|
||||
|
||||
# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
|
||||
# NODE_ENV must be set to production for these to take effect
|
||||
# STATIC_CACHE_MAX_AGE=172800
|
||||
# STATIC_CACHE_S_MAX_AGE=86400
|
||||
|
||||
# If you have another service in front of your LibreChat doing compression, disable express based compression here
|
||||
# DISABLE_COMPRESSION=true
|
||||
|
||||
#===================================================#
|
||||
# UI #
|
||||
#===================================================#
|
||||
|
||||
APP_TITLE={{ customer }} AI
|
||||
CUSTOM_FOOTER=
|
||||
HELP_AND_FAQ_URL=https://{{ domain }}
|
||||
|
||||
# SHOW_BIRTHDAY_ICON=true
|
||||
|
||||
# Google tag manager id
|
||||
#ANALYTICS_GTM_ID=user provided google tag manager id
|
||||
|
||||
#===============#
|
||||
# REDIS Options #
|
||||
#===============#
|
||||
|
||||
# REDIS_URI=10.10.10.10:6379
|
||||
# USE_REDIS=true
|
||||
|
||||
# USE_REDIS_CLUSTER=true
|
||||
# REDIS_CA=/path/to/ca.crt
|
||||
|
||||
#==================================================#
|
||||
# Others #
|
||||
#==================================================#
|
||||
# You should leave the following commented out #
|
||||
|
||||
# NODE_ENV=
|
||||
|
||||
# E2E_USER_EMAIL=
|
||||
# E2E_USER_PASSWORD=
|
||||
|
||||
#=====================================================#
|
||||
# Cache Headers #
|
||||
#=====================================================#
|
||||
# Headers that control caching of the index.html #
|
||||
# Default configuration prevents caching to ensure #
|
||||
# users always get the latest version. Customize #
|
||||
# only if you understand caching implications. #
|
||||
|
||||
# INDEX_HTML_CACHE_CONTROL=no-cache, no-store, must-revalidate
|
||||
# INDEX_HTML_PRAGMA=no-cache
|
||||
# INDEX_HTML_EXPIRES=0
|
||||
|
||||
# no-cache: Forces validation with server before using cached version
|
||||
# no-store: Prevents storing the response entirely
|
||||
# must-revalidate: Prevents using stale content when offline
|
||||
|
||||
#=====================================================#
|
||||
# OpenWeather #
|
||||
#=====================================================#
|
||||
OPENWEATHER_API_KEY=
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
services:
|
||||
api:
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.multi
|
||||
# target: api-build
|
||||
image: ghcr.io/danny-avila/librechat:v0.7.8
|
||||
container_name: {{ customer }}-librechat
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- 3080:3080
|
||||
depends_on:
|
||||
- mongodb
|
||||
- rag_api
|
||||
restart: always
|
||||
user: "${UID}:${GID}"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
env_file:
|
||||
- /opt/letsbe/env/librechat.env
|
||||
environment:
|
||||
- HOST=0.0.0.0
|
||||
- NODE_ENV=production
|
||||
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
- RAG_PORT=${RAG_PORT:-8000}
|
||||
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
|
||||
- EMAIL_HOST=mail.{{ domain }}
|
||||
- EMAIL_PORT=587
|
||||
- EMAIL_ENCRYPTION=starttls
|
||||
- EMAIL_USERNAME=noreply@{{ domain }}
|
||||
- EMAIL_PASSWORD=Q2WjzJ05525I0cmyxAYn57wKhRSumMHXnHN8
|
||||
- EMAIL_FROM_NAME={{customer}}AI
|
||||
- EMAIL_FROM=noreply@{{ domain }}
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./librechat.yaml
|
||||
target: /app/librechat.yaml
|
||||
- ./images:/app/client/public/images
|
||||
- ./uploads:/app/uploads
|
||||
- ./logs:/app/api/logs
|
||||
|
||||
mongodb:
|
||||
container_name: librechat-mongodb
|
||||
# ports: # Uncomment this to access mongodb from outside docker, not safe in deployment
|
||||
# - 27018:27017
|
||||
image: mongo
|
||||
restart: always
|
||||
user: "${UID}:${GID}"
|
||||
volumes:
|
||||
- ./data-node:/data/db
|
||||
command: mongod --noauth
|
||||
meilisearch:
|
||||
container_name: librechat-meilisearch
|
||||
image: getmeili/meilisearch:v1.12.3
|
||||
restart: always
|
||||
user: "${UID}:${GID}"
|
||||
# ports: # Uncomment this to access meilisearch from outside docker
|
||||
# - 7700:7700 # if exposing these ports, make sure your master key is not the default value
|
||||
env_file:
|
||||
- /opt/letsbe/env/librechat.env
|
||||
environment:
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
- MEILI_NO_ANALYTICS=true
|
||||
volumes:
|
||||
- ./meili_data_v1.12:/meili_data
|
||||
vectordb:
|
||||
image: ankane/pgvector:latest
|
||||
environment:
|
||||
POSTGRES_DB: librechat
|
||||
POSTGRES_USER: {{ librechat_postgres_user }}
|
||||
POSTGRES_PASSWORD: {{ librechat_postgres_password }}
|
||||
restart: always
|
||||
volumes:
|
||||
- pgdata2:/var/lib/postgresql/data
|
||||
rag_api:
|
||||
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
|
||||
environment:
|
||||
- DB_HOST=vectordb
|
||||
- RAG_PORT=8000
|
||||
restart: always
|
||||
depends_on:
|
||||
- vectordb
|
||||
env_file:
|
||||
- /opt/letsbe/env/librechat.env
|
||||
ports:
|
||||
- "8000:8000"
|
||||
|
||||
networks:
|
||||
# Declare the same network name as in the ActivePieces file, but mark as external
|
||||
{{ customer }}-activepieces:
|
||||
external: true
|
||||
|
||||
volumes:
|
||||
pgdata2:
|
||||
|
|
@ -0,0 +1,318 @@
|
|||
# For more information, see the Configuration Guide:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml
|
||||
|
||||
# Configuration version (required)
|
||||
version: 1.2.1
|
||||
|
||||
# Cache settings: Set to true to enable caching
|
||||
cache: true
|
||||
|
||||
# File strategy s3/firebase
|
||||
# fileStrategy: "s3"
|
||||
|
||||
# Custom interface configuration
|
||||
interface:
|
||||
customWelcome: "Welcome to {{ customer }} AI! Enjoy your experience."
|
||||
# Privacy policy settings
|
||||
privacyPolicy:
|
||||
externalUrl: 'https://librechat.ai/privacy-policy'
|
||||
openNewTab: true
|
||||
|
||||
# Terms of service
|
||||
termsOfService:
|
||||
externalUrl: 'https://librechat.ai/tos'
|
||||
openNewTab: true
|
||||
modalAcceptance: true
|
||||
modalTitle: "Terms of Service for LibreChat"
|
||||
modalContent: |
|
||||
# Terms and Conditions for LibreChat
|
||||
|
||||
*Effective Date: February 18, 2024*
|
||||
|
||||
Welcome to LibreChat, the informational website for the open-source AI chat platform, available at https://librechat.ai. These Terms of Service ("Terms") govern your use of our website and the services we offer. By accessing or using the Website, you agree to be bound by these Terms and our Privacy Policy, accessible at https://librechat.ai//privacy.
|
||||
|
||||
## 1. Ownership
|
||||
|
||||
Upon purchasing a package from LibreChat, you are granted the right to download and use the code for accessing an admin panel for LibreChat. While you own the downloaded code, you are expressly prohibited from reselling, redistributing, or otherwise transferring the code to third parties without explicit permission from LibreChat.
|
||||
|
||||
## 2. User Data
|
||||
|
||||
We collect personal data, such as your name, email address, and payment information, as described in our Privacy Policy. This information is collected to provide and improve our services, process transactions, and communicate with you.
|
||||
|
||||
## 3. Non-Personal Data Collection
|
||||
|
||||
The Website uses cookies to enhance user experience, analyze site usage, and facilitate certain functionalities. By using the Website, you consent to the use of cookies in accordance with our Privacy Policy.
|
||||
|
||||
## 4. Use of the Website
|
||||
|
||||
You agree to use the Website only for lawful purposes and in a manner that does not infringe the rights of, restrict, or inhibit anyone else's use and enjoyment of the Website. Prohibited behavior includes harassing or causing distress or inconvenience to any person, transmitting obscene or offensive content, or disrupting the normal flow of dialogue within the Website.
|
||||
|
||||
## 5. Governing Law
|
||||
|
||||
These Terms shall be governed by and construed in accordance with the laws of the United States, without giving effect to any principles of conflicts of law.
|
||||
|
||||
## 6. Changes to the Terms
|
||||
|
||||
We reserve the right to modify these Terms at any time. We will notify users of any changes by email. Your continued use of the Website after such changes have been notified will constitute your consent to such changes.
|
||||
|
||||
## 7. Contact Information
|
||||
|
||||
If you have any questions about these Terms, please contact us at contact@librechat.ai.
|
||||
|
||||
By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.
|
||||
|
||||
endpointsMenu: true
|
||||
modelSelect: true
|
||||
parameters: true
|
||||
sidePanel: true
|
||||
presets: true
|
||||
prompts: true
|
||||
bookmarks: true
|
||||
multiConvo: true
|
||||
agents: true
|
||||
|
||||
# Example Registration Object Structure (optional)
|
||||
registration:
|
||||
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple']
|
||||
# allowedDomains:
|
||||
# - "gmail.com"
|
||||
|
||||
|
||||
# Example Balance settings
|
||||
# balance:
|
||||
# enabled: false
|
||||
# startBalance: 20000
|
||||
# autoRefillEnabled: false
|
||||
# refillIntervalValue: 30
|
||||
# refillIntervalUnit: 'days'
|
||||
# refillAmount: 10000
|
||||
|
||||
speech:
|
||||
tts:
|
||||
openai:
|
||||
url: 'https://api.openai.com/v1'
|
||||
apiKey: ''
|
||||
model: 'tts-1-hd'
|
||||
voices: ['alloy']
|
||||
|
||||
#
|
||||
stt:
|
||||
openai:
|
||||
url: 'https://api.openai.com/v1'
|
||||
apiKey: ''
|
||||
model: 'whisper-1'
|
||||
|
||||
# rateLimits:
|
||||
# fileUploads:
|
||||
# ipMax: 100
|
||||
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
||||
# userMax: 50
|
||||
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
||||
# conversationsImport:
|
||||
# ipMax: 100
|
||||
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
|
||||
# userMax: 50
|
||||
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
|
||||
|
||||
# Example Actions Object Structure
|
||||
actions:
|
||||
allowedDomains:
|
||||
- "swapi.dev"
|
||||
- "librechat.ai"
|
||||
- "google.com"
|
||||
- "{{ domain_librechat }}"
|
||||
- "{{ domain_activepieces }}"
|
||||
# Example MCP Servers Object Structure
|
||||
# mcpServers:
|
||||
# everything:
|
||||
# # type: sse # type can optionally be omitted
|
||||
# url: http://localhost:3001/sse
|
||||
# timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
|
||||
# puppeteer:
|
||||
# type: stdio
|
||||
# command: npx
|
||||
# args:
|
||||
# - -y
|
||||
# - "@modelcontextprotocol/server-puppeteer"
|
||||
# timeout: 300000 # 5 minutes timeout for this server
|
||||
# filesystem:
|
||||
# # type: stdio
|
||||
# command: npx
|
||||
# args:
|
||||
# - -y
|
||||
# - "@modelcontextprotocol/server-filesystem"
|
||||
# - /home/user/LibreChat/
|
||||
# iconPath: /home/user/LibreChat/client/public/assets/logo.svg
|
||||
# mcp-obsidian:
|
||||
# command: npx
|
||||
# args:
|
||||
# - -y
|
||||
# - "mcp-obsidian"
|
||||
# - /path/to/obsidian/vault
|
||||
#mcpServers:
|
||||
#PortNimaraAI:
|
||||
#type: sse
|
||||
#url: "https://automation.portnimara.com/api/v1/mcp/d6br5VnJuHUPuzpFUGJEo/sse"
|
||||
#command: npx
|
||||
#args:
|
||||
# - -y
|
||||
# - mcp-remote
|
||||
# - "https://automation.portnimara.com/api/v1/mcp/d6br5VnJuHUPuzpFUGJEo/sse"
|
||||
|
||||
# Definition of custom endpoints
|
||||
endpoints:
|
||||
# assistants:
|
||||
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
||||
# pollIntervalMs: 3000 # Polling interval for checking assistant updates
|
||||
# timeoutMs: 180000 # Timeout for assistant operations
|
||||
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
||||
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
||||
# # excludedIds: ["asst_excludedAssistantId"]
|
||||
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
|
||||
# # privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
|
||||
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
||||
# retrievalModels: ["gpt-4-turbo-preview"]
|
||||
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
# capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
|
||||
# agents:
|
||||
# # (optional) Default recursion depth for agents, defaults to 25
|
||||
# recursionLimit: 50
|
||||
# # (optional) Max recursion depth for agents, defaults to 25
|
||||
# maxRecursionLimit: 100
|
||||
# # (optional) Disable the builder interface for agents
|
||||
# disableBuilder: false
|
||||
# # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
||||
# capabilities: ["execute_code", "file_search", "actions", "tools"]
|
||||
custom:
|
||||
# Groq Example
|
||||
- name: 'groq'
|
||||
apiKey: '${GROQ_API_KEY}'
|
||||
baseURL: 'https://api.groq.com/openai/v1/'
|
||||
models:
|
||||
default:
|
||||
[
|
||||
'llama3-70b-8192',
|
||||
'llama3-8b-8192',
|
||||
'llama2-70b-4096',
|
||||
'mixtral-8x7b-32768',
|
||||
'gemma-7b-it',
|
||||
]
|
||||
fetch: false
|
||||
titleConvo: true
|
||||
titleModel: 'mixtral-8x7b-32768'
|
||||
modelDisplayLabel: 'groq'
|
||||
|
||||
# Mistral AI Example
|
||||
- name: 'Mistral' # Unique name for the endpoint
|
||||
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# recommended environment variables:
|
||||
apiKey: '${MISTRAL_API_KEY}'
|
||||
baseURL: 'https://api.mistral.ai/v1'
|
||||
|
||||
# Models configuration
|
||||
models:
|
||||
# List of default models to use. At least one value is required.
|
||||
default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
|
||||
# Fetch option: Set to true to fetch models from API.
|
||||
fetch: true # Defaults to false.
|
||||
|
||||
# Optional configurations
|
||||
|
||||
# Title Conversation setting
|
||||
titleConvo: true # Set to true to enable title conversation
|
||||
|
||||
# Title Method: Choose between "completion" or "functions".
|
||||
# titleMethod: "completion" # Defaults to "completion" if omitted.
|
||||
|
||||
# Title Model: Specify the model to use for titles.
|
||||
titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
|
||||
|
||||
# Summarize setting: Set to true to enable summarization.
|
||||
# summarize: false
|
||||
|
||||
# Summary Model: Specify the model to use if summarization is enabled.
|
||||
# summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
||||
|
||||
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
||||
# forcePrompt: false
|
||||
|
||||
# The label displayed for the AI model in messages.
|
||||
modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
|
||||
|
||||
# Add additional parameters to the request. Default params will be overwritten.
|
||||
# addParams:
|
||||
# safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
||||
|
||||
# Drop Default params parameters from the request. See default params in guide linked below.
|
||||
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
||||
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
|
||||
|
||||
# OpenRouter Example
|
||||
- name: 'OpenRouter'
|
||||
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
||||
# recommended environment variables:
|
||||
apiKey: '${OPENROUTER_KEY}'
|
||||
baseURL: 'https://openrouter.ai/api/v1'
|
||||
models:
|
||||
default: ['meta-llama/llama-3-70b-instruct']
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: 'meta-llama/llama-3-70b-instruct'
|
||||
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
||||
dropParams: ['stop']
|
||||
modelDisplayLabel: 'OpenRouter'
|
||||
|
||||
# Portkey AI Example
|
||||
- name: "Portkey"
|
||||
apiKey: "dummy"
|
||||
baseURL: 'https://api.portkey.ai/v1'
|
||||
headers:
|
||||
x-portkey-api-key: '${PORTKEY_API_KEY}'
|
||||
x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}'
|
||||
models:
|
||||
default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest']
|
||||
fetch: true
|
||||
titleConvo: true
|
||||
titleModel: 'current_model'
|
||||
summarize: false
|
||||
summaryModel: 'current_model'
|
||||
forcePrompt: false
|
||||
modelDisplayLabel: 'Portkey'
|
||||
iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf
|
||||
|
||||
# fileConfig:
|
||||
# endpoints:
|
||||
# assistants:
|
||||
# fileLimit: 5
|
||||
# fileSizeLimit: 10 # Maximum size for an individual file in MB
|
||||
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
||||
# supportedMimeTypes:
|
||||
# - "image/.*"
|
||||
# - "application/pdf"
|
||||
# openAI:
|
||||
# disabled: false # Disables file uploading to the OpenAI endpoint
|
||||
# default:
|
||||
# totalSizeLimit: 20
|
||||
# YourCustomEndpointName:
|
||||
# fileLimit: 2
|
||||
# fileSizeLimit: 5
|
||||
# serverFileSizeLimit: 100 # Global server file size limit in MB
|
||||
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
||||
# # See the Custom Configuration Guide for more information on Assistants Config:
|
||||
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
|
||||
|
||||
|
||||
#modelSpecs:
|
||||
# (optional) force the UI to only ever pick from this list
|
||||
#enforce: false
|
||||
#prioritize: true
|
||||
|
||||
#list:
|
||||
# - name: "port-nimara-agent"
|
||||
#label: "{{ customer }} AI"
|
||||
#default: true # ← makes it the default on new chats
|
||||
#description: "Agent"
|
||||
|
||||
#preset:
|
||||
#endpoint: "agents" # ← use the Agents endpoint
|
||||
#agent_id: "" # ← your actual agent’s ID
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
[app]
|
||||
address = "0.0.0.0:9000"
|
||||
admin_username = "{{ listmonk_admin_username }}"
|
||||
admin_password = "{{ listmonk_admin_password }}"
|
||||
|
||||
[db]
|
||||
host = "{{ customer }}-listmonk-db"
|
||||
port = 5432
|
||||
user = "{{ listmonk_db_user }}"
|
||||
password = "{{ listmonk_db_password }}"
|
||||
database = "listmonk"
|
||||
ssl_mode = "disable"
|
||||
max_open = 25
|
||||
max_idle = 25
|
||||
max_lifetime = "300s"
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
listmonk-db:
|
||||
container_name: {{ customer }}-listmonk-db
|
||||
image: postgres:13
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-listmonk-postgresql:/var/lib/postgresql/data
|
||||
- {{ customer }}-listmonk-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3037:5432"
|
||||
environment:
|
||||
POSTGRES_DB: listmonk
|
||||
POSTGRES_USER: {{ listmonk_db_user }}
|
||||
POSTGRES_PASSWORD: {{ listmonk_db_password }}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U {{ listmonk_db_user }} -d listmonk"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 6
|
||||
networks:
|
||||
{{ customer }}-listmonk:
|
||||
ipv4_address: 172.20.6.2
|
||||
|
||||
listmonk-web:
|
||||
container_name: {{ customer }}-listmonk-web
|
||||
image: listmonk/listmonk:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
command: [sh, -c, "yes | ./listmonk --install --config config.toml && ./listmonk --config config.toml"]
|
||||
volumes:
|
||||
- ./config.toml:/listmonk/config.toml
|
||||
- {{ customer }}-listmonk-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3006:9000"
|
||||
depends_on:
|
||||
- listmonk-db
|
||||
environment:
|
||||
TZ: Etc/UTC
|
||||
networks:
|
||||
{{ customer }}-listmonk:
|
||||
ipv4_address: 172.20.6.3
|
||||
|
||||
|
||||
networks:
|
||||
{{ customer }}-listmonk:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.6.0/28
|
||||
gateway: 172.20.6.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-listmonk-postgresql:
|
||||
{{ customer }}-listmonk-backups:
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
minio:
|
||||
image: minio/minio:latest
|
||||
container_name: {{ customer }}-minio
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-minio-data:/data
|
||||
environment:
|
||||
- MINIO_ROOT_USER={{ minio_root_user }}
|
||||
- MINIO_ROOT_PASSWORD={{ minio_root_password }}
|
||||
command: server /data --console-address ":9001"
|
||||
ports:
|
||||
- "0.0.0.0:3058:9000"
|
||||
- "0.0.0.0:3059:9001"
|
||||
networks:
|
||||
{{ customer }}-minio:
|
||||
ipv4_address: 172.20.26.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-minio:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.26.0/28
|
||||
gateway: 172.20.26.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-minio-data:
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
n8n-postgres:
|
||||
container_name: {{ customer }}-n8n-postgres
|
||||
restart: always
|
||||
image: postgres:16 #original: postgres:latest
|
||||
environment:
|
||||
- POSTGRES_DB=n8n
|
||||
- POSTGRES_USER={{ n8n_postgres_user }}
|
||||
- POSTGRES_PASSWORD={{ n8n_postgres_password }}
|
||||
volumes:
|
||||
- {{ customer }}-n8n-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-n8n-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-n8n:
|
||||
ipv4_address: 172.20.8.2
|
||||
|
||||
n8n:
|
||||
container_name: {{ customer }}-n8n
|
||||
restart: always
|
||||
image: docker.n8n.io/n8nio/n8n
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- "127.0.0.1:3025:5678"
|
||||
environment:
|
||||
- DB_TYPE=postgresdb
|
||||
- DB_POSTGRESDB_DATABASE=n8n
|
||||
- DB_POSTGRESDB_HOST=n8n-postgres
|
||||
- DB_POSTGRESDB_PORT=5432
|
||||
- DB_POSTGRESDB_USER={{ n8n_postgres_user }}
|
||||
#- DB_POSTGRESDB_SCHEMA=public
|
||||
- DB_POSTGRESDB_PASSWORD={{ n8n_postgres_password }}
|
||||
- N8N_EDITOR_BASE_URL=https://{{ domain_n8n }}
|
||||
- N8N_EMAIL_MODE=smtp
|
||||
- N8N_SMTP_SSL=false
|
||||
- N8N_SMTP_HOST=
|
||||
- N8N_SMTP_PORT=
|
||||
- N8N_SMTP_USER=
|
||||
- N8N_SMTP_PASS=
|
||||
- N8N_SMTP_SENDER=
|
||||
volumes:
|
||||
- {{ customer }}-n8n-storage:/home/node/.n8n
|
||||
- {{ customer }}-n8n-backups:/tmp/backups
|
||||
links:
|
||||
- n8n-postgres
|
||||
networks:
|
||||
{{ customer }}-n8n:
|
||||
ipv4_address: 172.20.8.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-n8n:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.8.0/28
|
||||
gateway: 172.20.8.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-n8n-postgres:
|
||||
{{ customer }}-n8n-storage:
|
||||
{{ customer }}-n8n-backups:
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
db:
|
||||
container_name: {{ customer }}-nextcloud-postgres
|
||||
image: postgres:16-alpine #original postgres:alpine
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-nextcloud-database:/var/lib/postgresql/data:Z
|
||||
- {{ customer }}-nextcloud-backups:/tmp/backups
|
||||
environment:
|
||||
POSTGRES_DB: nextcloud
|
||||
POSTGRES_USER: {{ nextcloud_postgres_user }}
|
||||
POSTGRES_PASSWORD: {{ nextcloud_postgres_password }}
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.2
|
||||
|
||||
redis:
|
||||
container_name: {{ customer }}-nextcloud-redis
|
||||
image: redis:alpine
|
||||
restart: always
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.3
|
||||
|
||||
app:
|
||||
container_name: {{ customer }}-nextcloud-app
|
||||
image: nextcloud:production-apache
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- '127.0.0.1:3023:80'
|
||||
volumes:
|
||||
- {{ customer }}-nextcloud-html:/var/www/html:z
|
||||
- /opt/letsbe/config/nextcloud:/var/www/html/config
|
||||
- /opt/letsbe/data/nextcloud:/var/www/html/data
|
||||
- {{ customer }}-nextcloud-backups:/tmp/backups
|
||||
environment:
|
||||
#Nextcloud
|
||||
POSTGRES_HOST: {{ customer }}-nextcloud-postgres
|
||||
REDIS_HOST: {{ customer }}-nextcloud-redis
|
||||
POSTGRES_DB: nextcloud
|
||||
POSTGRES_USER: {{ nextcloud_postgres_user }}
|
||||
POSTGRES_PASSWORD: {{ nextcloud_postgres_password }}
|
||||
# #SMTP
|
||||
# SMTP_HOST: 'mail.{{ domain }}'
|
||||
# SMTP_PORT: '587'
|
||||
# SMTP_NAME: 'system@{{ domain }}'
|
||||
# SMTP_PASSWORD: ''
|
||||
# MAIL_FROM_ADDRESS: 'system'
|
||||
# MAIL_DOMAIN: '{{ domain }}'
|
||||
#Admin
|
||||
NEXTCLOUD_ADMIN_USER: administrator@letsbe.biz
|
||||
NEXTCLOUD_ADMIN_PASSWORD: '{{ nextcloud_admin_password }}'
|
||||
#Config
|
||||
NEXTCLOUD_TRUSTED_DOMAINS: '{{ domain_nextcloud }} 127.0.0.1 0.0.0.0'
|
||||
TRUSTED_PROXIES: '{{ domain_nextcloud }} 127.0.0.1 0.0.0.0 172.*.*.*'
|
||||
OVERWRITECLIURL: https://{{ domain_nextcloud }}
|
||||
OVERWRITEPROTOCOL: https
|
||||
OVERWRITEHOST: {{ domain_nextcloud }}
|
||||
#APACHE_DISABLE_REWRITE_IP: 1
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.4
|
||||
|
||||
cron:
|
||||
container_name: {{ customer }}-nextcloud-cron
|
||||
image: nextcloud:production-apache
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-nextcloud-html:/var/www/html:z
|
||||
- /opt/letsbe/config/nextcloud:/var/www/html/config
|
||||
- /opt/letsbe/data/nextcloud:/var/www/html/data
|
||||
entrypoint: /cron.sh
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.5
|
||||
|
||||
collabora:
|
||||
image: collabora/code:latest
|
||||
container_name: {{ customer }}-nextcloud-collabora
|
||||
restart: always
|
||||
environment:
|
||||
- password={{ collabora_password }}
|
||||
- username={{ collabora_user }}
|
||||
- domain={{ domain_collabora }}
|
||||
- extra_params=--o:ssl.enable=true
|
||||
ports:
|
||||
- '127.0.0.1:3044:9980'
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.7
|
||||
|
||||
nextcloud-whiteboard-server:
|
||||
image: ghcr.io/nextcloud-releases/whiteboard:release
|
||||
ports:
|
||||
- '127.0.0.1:3060:3002'
|
||||
environment:
|
||||
NEXTCLOUD_URL: '{{ domain_nextcloud }}'
|
||||
JWT_SECRET_KEY: '{{ nextcloud_jwt_secret }}'
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.8
|
||||
|
||||
talk-hpb:
|
||||
container_name: {{ customer }}-nextcloud-talk-hpb
|
||||
image: ghcr.io/nextcloud-releases/aio-talk:latest
|
||||
restart: always
|
||||
environment:
|
||||
NC_DOMAIN: {{ domain_nextcloud }}
|
||||
TALK_PORT: "3478"
|
||||
TURN_SECRET: "{{ turn_secret }}"
|
||||
SIGNALING_SECRET: "{{ signaling_secret }}"
|
||||
INTERNAL_SECRET: "{{ internal_secret }}"
|
||||
ports:
|
||||
- "127.0.0.1:3061:8081"
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.6
|
||||
|
||||
coturn:
|
||||
image: instrumentisto/coturn:latest
|
||||
container_name: {{ customer }}-coturn
|
||||
restart: always
|
||||
ports:
|
||||
- "3478:3478/udp"
|
||||
- "3478:3478/tcp"
|
||||
- "49160-49200:49160-49200/udp"
|
||||
command:
|
||||
-n
|
||||
--log-file=stdout
|
||||
--fingerprint
|
||||
--realm={{ domain_nextcloud }}
|
||||
--external-ip={{ server_ip }}/172.20.9.9
|
||||
--listening-port=3478
|
||||
--min-port=49160
|
||||
--max-port=49200
|
||||
--use-auth-secret
|
||||
--static-auth-secret={{ turn_secret }}
|
||||
--no-multicast-peers
|
||||
--no-cli
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipv4_address: 172.20.9.9
|
||||
|
||||
networks:
|
||||
{{ customer }}-nextcloud:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.9.0/28
|
||||
gateway: 172.20.9.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-nextcloud-html:
|
||||
# driver: local
|
||||
# driver_opts:
|
||||
# size: 100g
|
||||
{{ customer }}-nextcloud-database:
|
||||
# driver: local
|
||||
# driver_opts:
|
||||
# size: 100g
|
||||
{{ customer }}-nextcloud-backups:
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
nocodb:
|
||||
container_name: {{ customer }}-nocodb
|
||||
image: nocodb/nocodb:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
environment:
|
||||
- NC_DB=pg://{{ customer }}-nocodb-db:5432?u=postgres&p={{ nocodb_postgres_password }}&d=nocodb
|
||||
volumes:
|
||||
- {{ customer }}-nocodb-data:/usr/app/data
|
||||
- {{ customer }}-nocodb-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3057:8080" # Host port 3057 -> Container port 8080
|
||||
depends_on:
|
||||
nocodb-db:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
{{ customer }}-nocodb:
|
||||
ipv4_address: 172.20.24.2
|
||||
|
||||
nocodb-db:
|
||||
container_name: {{ customer }}-nocodb-db
|
||||
image: postgres:16.6
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_DB: nocodb
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: {{ nocodb_postgres_password }}
|
||||
volumes:
|
||||
- {{ customer }}-nocodb-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-nocodb-backups:/tmp/backups
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
{{ customer }}-nocodb:
|
||||
ipv4_address: 172.20.24.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-nocodb:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.24.0/28
|
||||
gateway: 172.20.24.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-nocodb-data:
|
||||
{{ customer }}-nocodb-postgres:
|
||||
{{ customer }}-nocodb-backups:
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
odoo-web:
|
||||
container_name: {{ customer }}-odoo-web
|
||||
restart: always
|
||||
image: odoo:latest
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
depends_on:
|
||||
- odoo-postgres
|
||||
ports:
|
||||
- "127.0.0.1:3019:8069"
|
||||
environment:
|
||||
- HOST=odoo-postgres
|
||||
- USER={{ odoo_postgres_user }}
|
||||
- PASSWORD={{ odoo_postgres_password }}
|
||||
volumes:
|
||||
- {{ customer }}-odoo-web-data:/var/lib/odoo
|
||||
- {{ customer }}-odoo-web-config:/etc/odoo
|
||||
- {{ customer }}-odoo-web-addons:/mnt/extra-addons
|
||||
- {{ customer }}-odoo-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-odoo:
|
||||
ipv4_address: 172.20.19.2
|
||||
|
||||
odoo-postgres:
|
||||
container_name: {{ customer }}-odoo-postgres
|
||||
image: postgres:15
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_USER: {{ odoo_postgres_user }}
|
||||
POSTGRES_PASSWORD: {{ odoo_postgres_password }}
|
||||
volumes:
|
||||
- {{ customer }}-odoo-postgres:/var/lib/postgresql/data/
|
||||
- {{ customer }}-odoo-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-odoo:
|
||||
ipv4_address: 172.20.19.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-odoo:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.19.0/28
|
||||
gateway: 172.20.19.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-odoo-postgres:
|
||||
{{ customer }}-odoo-web-data:
|
||||
{{ customer }}-odoo-web-config:
|
||||
{{ customer }}-odoo-web-addons:
|
||||
{{ customer }}-odoo-backups:
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
orchestrator-db:
|
||||
container_name: {{ customer }}-orchestrator-db
|
||||
image: postgres:16-alpine
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_USER: orchestrator
|
||||
POSTGRES_PASSWORD: {{ orchestrator_db_password }}
|
||||
POSTGRES_DB: orchestrator
|
||||
volumes:
|
||||
- {{ customer }}-orchestrator-db:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U orchestrator -d orchestrator"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
{{ customer }}-orchestrator:
|
||||
ipv4_address: 172.20.32.2
|
||||
|
||||
orchestrator-api:
|
||||
container_name: {{ customer }}-orchestrator-api
|
||||
image: code.letsbe.solutions/letsbe/orchestrator:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- '127.0.0.1:8100:8000'
|
||||
command: ["sh", "-c", "alembic upgrade head && uvicorn app.main:app --host 0.0.0.0 --port 8000"]
|
||||
environment:
|
||||
DATABASE_URL: postgresql+asyncpg://orchestrator:{{ orchestrator_db_password }}@orchestrator-db:5432/orchestrator
|
||||
DEBUG: "false"
|
||||
APP_NAME: "LetsBe Orchestrator"
|
||||
ADMIN_API_KEY: {{ admin_api_key }}
|
||||
LOCAL_MODE: "true"
|
||||
LOCAL_AGENT_KEY: {{ local_agent_key }}
|
||||
HUB_URL: {{ hub_url }}
|
||||
HUB_API_KEY: {{ hub_api_key }}
|
||||
HUB_TELEMETRY_ENABLED: {{ hub_telemetry_enabled }}
|
||||
INSTANCE_ID: {{ instance_id }}
|
||||
LICENSE_KEY: {{ license_key }}
|
||||
depends_on:
|
||||
orchestrator-db:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 10s
|
||||
networks:
|
||||
{{ customer }}-orchestrator:
|
||||
ipv4_address: 172.20.32.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-orchestrator:
|
||||
name: {{ customer }}-orchestrator
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.32.0/28
|
||||
gateway: 172.20.32.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-orchestrator-db:
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
---
|
||||
version: "3.5"
|
||||
|
||||
services:
|
||||
penpot-frontend:
|
||||
container_name: {{ customer }}-penpot-frontend
|
||||
image: "penpotapp/frontend:latest"
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- '127.0.0.1:3021:80'
|
||||
volumes:
|
||||
- {{ customer }}-penpot-assets:/opt/data/assets
|
||||
depends_on:
|
||||
- penpot-backend
|
||||
- penpot-exporter
|
||||
# labels:
|
||||
# - "traefik.enable=true"
|
||||
environment:
|
||||
- PENPOT_FLAGS=enable-registration enable-login-with-password
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipv4_address: 172.20.10.2
|
||||
|
||||
penpot-backend:
|
||||
container_name: {{ customer }}-penpot-backend
|
||||
image: "penpotapp/backend:latest"
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-penpot-assets:/opt/data/assets
|
||||
- {{ customer }}-penpot-backups:/tmp/backups
|
||||
depends_on:
|
||||
- penpot-postgres
|
||||
- penpot-redis
|
||||
environment:
|
||||
- PENPOT_FLAGS=enable-registration enable-login-with-password disable-email-verification enable-smtp enable-prepl-server
|
||||
- PENPOT_SECRET_KEY={{ penpot_secret_key }}
|
||||
- PENPOT_TELEMETRY_ENABLED=false
|
||||
# - PENPOT_PREPL_HOST=0.0.0.0
|
||||
- PENPOT_PUBLIC_URI=https://{{ domain_penpot }} #http://localhost:9001
|
||||
## Database
|
||||
- PENPOT_DATABASE_URI=postgresql://{{ customer }}-penpot-postgres/penpot
|
||||
- PENPOT_DATABASE_USERNAME={{ penpot_db_user }}
|
||||
- PENPOT_DATABASE_PASSWORD={{ penpot_db_password }}
|
||||
- PENPOT_REDIS_URI=redis://{{ customer }}-penpot-redis/0
|
||||
- PENPOT_ASSETS_STORAGE_BACKEND=assets-fs
|
||||
- PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets
|
||||
## S3
|
||||
# - AWS_ACCESS_KEY_ID=<KEY_ID>
|
||||
# - AWS_SECRET_ACCESS_KEY=<ACCESS_KEY>
|
||||
# - PENPOT_ASSETS_STORAGE_BACKEND=assets-s3
|
||||
# - PENPOT_STORAGE_ASSETS_S3_ENDPOINT=http://penpot-minio:9000
|
||||
# - PENPOT_STORAGE_ASSETS_S3_BUCKET=<BUKET_NAME>
|
||||
## SMTP
|
||||
- PENPOT_SMTP_DEFAULT_FROM=no-reply@{{ domain }}
|
||||
- PENPOT_SMTP_DEFAULT_REPLY_TO=support@{{ domain }}
|
||||
- PENPOT_SMTP_HOST=mail.{{ domain }}
|
||||
- PENPOT_SMTP_PORT=587
|
||||
- PENPOT_SMTP_USERNAME=
|
||||
- PENPOT_SMTP_PASSWORD=
|
||||
- PENPOT_SMTP_TLS=true
|
||||
- PENPOT_SMTP_SSL=false
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipv4_address: 172.20.10.3
|
||||
|
||||
penpot-exporter:
|
||||
container_name: {{ customer }}-penpot-exporter
|
||||
image: "penpotapp/exporter:latest"
|
||||
restart: always
|
||||
environment:
|
||||
- PENPOT_PUBLIC_URI=http://{{ customer }}-penpot-frontend
|
||||
- PENPOT_REDIS_URI=redis://{{ customer }}-penpot-redis/0
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipv4_address: 172.20.10.4
|
||||
|
||||
penpot-postgres:
|
||||
container_name: {{ customer }}-penpot-postgres
|
||||
image: "postgres:15"
|
||||
restart: always
|
||||
stop_signal: SIGINT
|
||||
volumes:
|
||||
- {{ customer }}-penpot-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-penpot-backups:/tmp/backups
|
||||
environment:
|
||||
- POSTGRES_INITDB_ARGS=--data-checksums
|
||||
- POSTGRES_DB=penpot
|
||||
- POSTGRES_USER={{ penpot_db_user }}
|
||||
- POSTGRES_PASSWORD={{ penpot_db_password }}
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipv4_address: 172.20.10.5
|
||||
|
||||
penpot-redis:
|
||||
container_name: {{ customer }}-penpot-redis
|
||||
image: redis:7
|
||||
restart: always
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipv4_address: 172.20.10.6
|
||||
|
||||
# penpot-mailcatch:
|
||||
# container_name: {{ customer }}-penpot-mailcatch
|
||||
# image: sj26/mailcatcher:latest
|
||||
# restart: always
|
||||
# # expose:
|
||||
# # - '1025'
|
||||
# ports:
|
||||
# - "127.0.0.1:3048:1080"
|
||||
# networks:
|
||||
# {{ customer }}-penpot:
|
||||
# ipv4_address: 172.20.10.7
|
||||
|
||||
networks:
|
||||
{{ customer }}-penpot:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.10.0/28
|
||||
gateway: 172.20.10.1
|
||||
|
||||
# networks:
|
||||
# penpot:
|
||||
|
||||
volumes:
|
||||
{{ customer }}-penpot-assets:
|
||||
{{ customer }}-penpot-postgres:
|
||||
{{ customer }}-penpot-backups:
|
||||
|
||||
## Relevant flags for frontend:
|
||||
## - demo-users
|
||||
## - login-with-github
|
||||
## - login-with-gitlab
|
||||
## - login-with-google
|
||||
## - login-with-ldap
|
||||
## - login-with-oidc
|
||||
## - login-with-password
|
||||
## - registration
|
||||
## - webhooks
|
||||
##
|
||||
## You can read more about all available flags on:
|
||||
## https://help.penpot.app/technical-guide/configuration/#advanced-configuration
|
||||
|
||||
##Environment variables
|
||||
## Relevant flags for backend:
|
||||
## - demo-users
|
||||
## - email-verification
|
||||
## - log-emails
|
||||
## - log-invitation-tokens
|
||||
## - login-with-github
|
||||
## - login-with-gitlab
|
||||
## - login-with-google
|
||||
## - login-with-ldap
|
||||
## - login-with-oidc
|
||||
## - login-with-password
|
||||
## - registration
|
||||
## - secure-session-cookies
|
||||
## - smtp
|
||||
## - smtp-debug
|
||||
## - telemetry
|
||||
## - webhooks
|
||||
## - prepl-server
|
||||
## https://help.penpot.app/technical-guide/configuration/#advanced-configuration
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
portainer:
|
||||
container_name: {{ customer }}-portainer
|
||||
image: portainer/portainer-ce:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- '127.0.0.1:9000:9000'
|
||||
- '127.0.0.1:9443:9443'
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- {{ customer }}-portainer_data:/data
|
||||
- {{ customer }}-portainer-backups:/tmp/backups
|
||||
- /opt/letsbe/env/portainer_admin_password.txt:/tmp/portainer_admin_password.txt:ro
|
||||
command: --admin-password-file /tmp/portainer_admin_password.txt
|
||||
networks:
|
||||
{{ customer }}-portainer:
|
||||
ipv4_address: 172.20.20.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-portainer:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.20.0/28
|
||||
gateway: 172.20.20.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-portainer_data:
|
||||
{{ customer }}-portainer-backups:
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
poste:
|
||||
container_name: {{ customer }}-poste
|
||||
image: analogic/poste.io:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
hostname: {{ domain_poste }}
|
||||
# network_mode: host
|
||||
volumes:
|
||||
- {{ customer }}-poste-data:/data
|
||||
- {{ customer }}-poste-backups:/tmp/backups
|
||||
ports:
|
||||
- "25:25"
|
||||
- "127.0.0.1:3003:80"
|
||||
- "127.0.0.1:3004:443"
|
||||
- "110:110"
|
||||
- "143:143"
|
||||
- "465:465"
|
||||
- "587:587"
|
||||
- "993:993"
|
||||
- "995:995"
|
||||
- "4190:4190"
|
||||
environment:
|
||||
TZ: Europe/Berlin
|
||||
HTTPS: ON
|
||||
DISABLE_CLAMAV: TRUE
|
||||
DISABLE_RSPAMD: TRUE
|
||||
DISABLE_ROUNDCUBE: TRUE
|
||||
VIRTUAL_HOST: {{ domain_poste }}
|
||||
networks:
|
||||
{{ customer }}-poste:
|
||||
ipv4_address: 172.20.11.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-poste:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.11.0/28
|
||||
gateway: 172.20.11.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-poste-data:
|
||||
{{ customer }}-poste-backups:
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
REDASH_HOST=https://{{ redash_domain }}
|
||||
REDASH_MAIL_SERVER=mail.{{ domain }}
|
||||
REDASH_MAIL_PORT=465
|
||||
REDASH_MAIL_USE_TLS=false
|
||||
REDASH_MAIL_USE_SSL=true
|
||||
REDASH_MAIL_USERNAME=noreply@{{ domain }}
|
||||
REDASH_MAIL_PASSWORD=
|
||||
REDASH_MAIL_DEFAULT_SENDER="Redash <noreply@{{ domain }}>"
|
||||
REDASH_SECRET_KEY={{ redash_secret_key }}
|
||||
REDASH_DATABASE_URL=postgresql://{{ redash_postgres_user }}:{{ redash_postgres_password }}@redash-postgres:5432/redash
|
||||
POSTGRES_USER={{ redash_postgres_user }}
|
||||
POSTGRES_PASSWORD={{ redash_postgres_password }}
|
||||
POSTGRES_DB=redash
|
||||
REDASH_COOKIE_SECRET={{ redash_cookie_secret }}
|
||||
REDASH_ENFORCE_HTTPS=true
|
||||
REDASH_REDIS_URL=redis://redash-redis:6379/0
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
version: "3.8"
|
||||
|
||||
networks:
|
||||
redash_network:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.28.0/28
|
||||
gateway: 172.20.28.1
|
||||
|
||||
x-redash-service: &redash-service
|
||||
image: redash/redash:25.1.0
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
env_file: /opt/letsbe/env/redash.env
|
||||
restart: always
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.2
|
||||
|
||||
services:
|
||||
server:
|
||||
<<: *redash-service
|
||||
command: server
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- "3064:5000"
|
||||
environment:
|
||||
REDASH_WEB_WORKERS: 4
|
||||
container_name: {{ customer }}-redash-server
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.3
|
||||
|
||||
scheduler:
|
||||
<<: *redash-service
|
||||
command: scheduler
|
||||
depends_on:
|
||||
- server
|
||||
container_name: {{ customer }}-redash-scheduler
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.4
|
||||
|
||||
scheduled_worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
QUEUES: "scheduled_queries,schemas"
|
||||
WORKERS_COUNT: 1
|
||||
container_name: {{ customer }}-redash-scheduled-worker
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.5
|
||||
|
||||
adhoc_worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
depends_on:
|
||||
- server
|
||||
environment:
|
||||
QUEUES: "queries"
|
||||
WORKERS_COUNT: 2
|
||||
container_name: {{ customer }}-redash-adhoc-worker
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.6
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
restart: unless-stopped
|
||||
container_name: redash-redis
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.7
|
||||
|
||||
postgres:
|
||||
image: postgres:13-alpine
|
||||
env_file: /opt/letsbe/env/redash.env
|
||||
volumes:
|
||||
- ./postgres-data:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
container_name: redash-postgres
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.8
|
||||
|
||||
worker:
|
||||
<<: *redash-service
|
||||
command: worker
|
||||
environment:
|
||||
QUEUES: "periodic,emails,default"
|
||||
WORKERS_COUNT: 1
|
||||
container_name: {{ customer }}-redash-worker
|
||||
networks:
|
||||
redash_network:
|
||||
ipv4_address: 172.20.28.9
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
version: "3.5"
|
||||
|
||||
services:
|
||||
squidex_mongo:
|
||||
container_name: {{ customer }}-squidex-mongo
|
||||
image: "mongo:6"
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-squidex-mongo:/data/db
|
||||
- {{ customer }}-squidex-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-squidex:
|
||||
ipv4_address: 172.20.12.2
|
||||
|
||||
squidex_squidex:
|
||||
container_name: {{ customer }}-squidex-squidex
|
||||
image: "squidex/squidex:7"
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- "127.0.0.1:3002:80"
|
||||
environment:
|
||||
URLS__BASEURL: 'https://{{ domain_squidex }}'
|
||||
UI__ONLYADMINSCANCREATEAPPS: false
|
||||
UI__ONLYADMINSCANCREATETEAMS: false
|
||||
EVENTSTORE__TYPE: MongoDB
|
||||
EVENTSTORE__MONGODB__CONFIGURATION: mongodb://squidex_mongo
|
||||
STORE__MONGODB__CONFIGURATION: mongodb://squidex_mongo
|
||||
IDENTITY__ADMINEMAIL: {{ squidex_adminemail }}
|
||||
IDENTITY__ADMINPASSWORD: {{ squidex_adminpassword }}
|
||||
# - IDENTITY__GOOGLECLIENT=${SQUIDEX_GOOGLECLIENT}
|
||||
# - IDENTITY__GOOGLESECRET=${SQUIDEX_GOOGLESECRET}
|
||||
# - IDENTITY__GITHUBCLIENT=${SQUIDEX_GITHUBCLIENT}
|
||||
# - IDENTITY__GITHUBSECRET=${SQUIDEX_GITHUBSECRET}
|
||||
# - IDENTITY__MICROSOFTCLIENT=${SQUIDEX_MICROSOFTCLIENT}
|
||||
# - IDENTITY__MICROSOFTSECRET=${SQUIDEX_MICROSOFTSECRET}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:80/healthz"]
|
||||
start_period: 60s
|
||||
depends_on:
|
||||
- squidex_mongo
|
||||
volumes:
|
||||
- {{ customer }}-squidex-assets:/app/Assets
|
||||
- {{ customer }}-squidex-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-squidex:
|
||||
ipv4_address: 172.20.12.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-squidex:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.12.0/28
|
||||
gateway: 172.20.12.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-squidex-mongo:
|
||||
{{ customer }}-squidex-assets:
|
||||
{{ customer }}-squidex-backups:
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
services:
|
||||
agent:
|
||||
image: code.letsbe.solutions/letsbe/sysadmin-agent:latest
|
||||
container_name: {{ customer }}-agent
|
||||
|
||||
# Join orchestrator network for container-to-container communication
|
||||
networks:
|
||||
- {{ customer }}-orchestrator
|
||||
|
||||
# Enable host.docker.internal on Linux (for accessing host services)
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
environment:
|
||||
# Required: Orchestrator connection
|
||||
# In LOCAL_MODE, connect via shared Docker network
|
||||
- ORCHESTRATOR_URL=http://{{ customer }}-orchestrator-api:8000
|
||||
|
||||
# ============================================================
|
||||
# AUTHENTICATION - Supports two modes (choose one)
|
||||
# ============================================================
|
||||
|
||||
# LOCAL_MODE: Single-tenant local deployment
|
||||
# When LOCAL_MODE=true, agent uses LOCAL_AGENT_KEY to register
|
||||
# via the /register-local endpoint (Phase 2 secure flow)
|
||||
- LOCAL_MODE=true
|
||||
- LOCAL_AGENT_KEY={{ local_agent_key }}
|
||||
|
||||
# Multi-tenant mode: Registration token from orchestrator
|
||||
# When LOCAL_MODE=false (default), agent uses REGISTRATION_TOKEN
|
||||
# to register via the standard /register endpoint
|
||||
# This token is obtained from the orchestrator's registration-tokens API
|
||||
- REGISTRATION_TOKEN={{ sysadmin_registration_token }}
|
||||
|
||||
# Note: After first registration, credentials are persisted to
|
||||
# ~/.letsbe-agent/credentials.json and tokens are no longer needed
|
||||
# ============================================================
|
||||
|
||||
# Timing (seconds)
|
||||
- HEARTBEAT_INTERVAL=${HEARTBEAT_INTERVAL:-30}
|
||||
- POLL_INTERVAL=${POLL_INTERVAL:-5}
|
||||
|
||||
# Logging
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- LOG_JSON=${LOG_JSON:-true}
|
||||
|
||||
# Resilience
|
||||
- MAX_CONCURRENT_TASKS=${MAX_CONCURRENT_TASKS:-3}
|
||||
- BACKOFF_BASE=${BACKOFF_BASE:-1.0}
|
||||
- BACKOFF_MAX=${BACKOFF_MAX:-60.0}
|
||||
- CIRCUIT_BREAKER_THRESHOLD=${CIRCUIT_BREAKER_THRESHOLD:-5}
|
||||
- CIRCUIT_BREAKER_COOLDOWN=${CIRCUIT_BREAKER_COOLDOWN:-300}
|
||||
|
||||
# Security
|
||||
- ALLOWED_FILE_ROOT=${ALLOWED_FILE_ROOT:-/opt/letsbe}
|
||||
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-10485760}
|
||||
- SHELL_TIMEOUT=${SHELL_TIMEOUT:-60}
|
||||
|
||||
# Playwright browser automation
|
||||
- PLAYWRIGHT_ARTIFACTS_DIR=/opt/letsbe/playwright-artifacts
|
||||
- PLAYWRIGHT_DEFAULT_TIMEOUT_MS=60000
|
||||
- PLAYWRIGHT_NAVIGATION_TIMEOUT_MS=120000
|
||||
|
||||
# MCP Browser Sidecar connection (for LLM-driven browser control)
|
||||
- MCP_BROWSER_URL=http://mcp-browser:8931
|
||||
- MCP_BROWSER_API_KEY={{ mcp_browser_api_key }}
|
||||
|
||||
volumes:
|
||||
# Docker socket for container management
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
|
||||
# Host directory mounts for real infrastructure access
|
||||
- /opt/letsbe/env:/opt/letsbe/env
|
||||
- /opt/letsbe/stacks:/opt/letsbe/stacks
|
||||
- /opt/letsbe/nginx:/opt/letsbe/nginx
|
||||
|
||||
# Credential persistence (survives restarts without re-registration)
|
||||
- agent_home:/home/agent/.letsbe-agent
|
||||
|
||||
# Playwright artifacts storage
|
||||
- playwright_artifacts:/opt/letsbe/playwright-artifacts
|
||||
|
||||
# Security options for Chromium sandboxing
|
||||
security_opt:
|
||||
- seccomp=./chromium-seccomp.json
|
||||
|
||||
# Run as root for Docker socket access
|
||||
# TODO: Use Docker group membership instead for better security
|
||||
user: root
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
# Resource limits (increased for Playwright browser automation)
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
|
||||
mcp-browser:
|
||||
image: code.letsbe.solutions/letsbe/mcp-browser:latest
|
||||
container_name: {{ customer }}-mcp-browser
|
||||
|
||||
# Join orchestrator network (shared with agent)
|
||||
networks:
|
||||
- {{ customer }}-orchestrator
|
||||
|
||||
environment:
|
||||
# Session limits
|
||||
- MAX_SESSIONS=${MAX_SESSIONS:-3}
|
||||
- IDLE_TIMEOUT_SECONDS=${IDLE_TIMEOUT_SECONDS:-300}
|
||||
- MAX_SESSION_LIFETIME_SECONDS=${MAX_SESSION_LIFETIME_SECONDS:-1800}
|
||||
- MAX_ACTIONS_PER_SESSION=${MAX_ACTIONS_PER_SESSION:-50}
|
||||
|
||||
# Logging
|
||||
- LOG_LEVEL=${LOG_LEVEL:-INFO}
|
||||
- LOG_JSON=${LOG_JSON:-true}
|
||||
|
||||
# Screenshots
|
||||
- SCREENSHOTS_DIR=/screenshots
|
||||
|
||||
# Authentication
|
||||
- API_KEY={{ mcp_browser_api_key }}
|
||||
|
||||
volumes:
|
||||
# Screenshots storage
|
||||
- mcp_screenshots:/screenshots
|
||||
|
||||
# Security options for Chromium sandboxing
|
||||
security_opt:
|
||||
- seccomp=./chromium-seccomp.json
|
||||
|
||||
restart: unless-stopped
|
||||
|
||||
# Resource limits for browser automation
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: '1.5'
|
||||
memory: 1G
|
||||
reservations:
|
||||
cpus: '0.25'
|
||||
memory: 256M
|
||||
|
||||
volumes:
|
||||
agent_home:
|
||||
name: {{ customer }}-agent-home
|
||||
playwright_artifacts:
|
||||
name: {{ customer }}-playwright-artifacts
|
||||
mcp_screenshots:
|
||||
name: {{ customer }}-mcp-screenshots
|
||||
|
||||
networks:
|
||||
{{ customer }}-orchestrator:
|
||||
external: true
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
## Make sure to change this to your own random string of 32 characters (https://docs.typebot.io/self-hosting/deploy/docker#2-add-the-required-configuratio>
|
||||
ENCRYPTION_SECRET={{ typebot_encryption_secret }}
|
||||
|
||||
DATABASE_URL=postgresql://postgres:{{ typebot_postgres_password }}@{{ customer }}-typebot-db:5432/typebot
|
||||
|
||||
NODE_OPTIONS=--no-node-snapshot
|
||||
|
||||
NEXTAUTH_URL=https://{{ domain_botlab }}
|
||||
NEXT_PUBLIC_VIEWER_URL=https://{{ domain_bot_viewer }}
|
||||
|
||||
DEFAULT_WORKSPACE_PLAN=UNLIMITED
|
||||
DISABLE_SIGNUP=false
|
||||
|
||||
ADMIN_EMAIL=administrator@{{ domain }}
|
||||
## For more configuration options check out: https://docs.typebot.io/self-hosting/configuration
|
||||
|
||||
|
||||
## SMTP Configuration (Make noreply email account too)
|
||||
SMTP_USERNAME=noreply@{{ domain }}
|
||||
SMTP_PASSWORD=
|
||||
SMTP_HOST=mail.{{ domain }}
|
||||
SMTP_PORT=465
|
||||
SMTP_SECURE=true
|
||||
NEXT_PUBLIC_SMTP_FROM="{{ company_name }} <noreply@{{ domain }}>"
|
||||
SMTP_AUTH_DISABLED=false
|
||||
|
||||
|
||||
|
||||
## S3 Configuration for MinIO
|
||||
S3_ACCESS_KEY=#replace
|
||||
S3_SECRET_KEY=#replace
|
||||
S3_BUCKET=bots
|
||||
S3_PORT=
|
||||
S3_ENDPOINT={{ domain_s3 }}
|
||||
S3_SSL=true
|
||||
S3_REGION=eu-central
|
||||
S3_PUBLIC_CUSTOM_DOMAIN=https://{{ domain_s3 }}/bots
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
version: '3.3'
|
||||
|
||||
volumes:
|
||||
{{ customer }}-typebot-db-data:
|
||||
|
||||
services:
|
||||
{{ customer }}-typebot-db:
|
||||
image: postgres:16
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-typebot-db-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
- POSTGRES_DB=typebot
|
||||
- POSTGRES_PASSWORD={{ typebot_postgres_password }}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
{{ customer }}-typebot:
|
||||
ipv4_address: 172.20.25.2
|
||||
|
||||
typebot-builder:
|
||||
image: baptistearno/typebot-builder:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
depends_on:
|
||||
{{ customer }}-typebot-db:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- '3061:3000'
|
||||
extra_hosts:
|
||||
- 'host.docker.internal:host-gateway'
|
||||
env_file: /opt/letsbe/env/typebot.env
|
||||
networks:
|
||||
{{ customer }}-typebot:
|
||||
ipv4_address: 172.20.25.3
|
||||
|
||||
typebot-viewer:
|
||||
image: baptistearno/typebot-viewer:latest
|
||||
depends_on:
|
||||
{{ customer }}-typebot-db:
|
||||
condition: service_healthy
|
||||
restart: always
|
||||
ports:
|
||||
- '3062:3000'
|
||||
env_file: /opt/letsbe/env/typebot.env
|
||||
networks:
|
||||
{{ customer }}-typebot:
|
||||
ipv4_address: 172.20.25.4
|
||||
|
||||
networks:
|
||||
{{ customer }}-typebot:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.25.0/28
|
||||
gateway: 172.20.25.1
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
umami:
|
||||
container_name: {{ customer }}-umami
|
||||
image: ghcr.io/umami-software/umami:postgresql-latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- "127.0.0.1:3008:3000"
|
||||
environment:
|
||||
DATABASE_URL: postgresql://{{ umami_postgres_user }}:{{ umami_postgres_password }}@{{ customer }}-umami-db:5432/umami
|
||||
DATABASE_TYPE: postgresql
|
||||
APP_SECRET: '{{ umami_app_secret }}'
|
||||
networks:
|
||||
{{ customer }}-umami:
|
||||
ipv4_address: 172.20.13.2
|
||||
depends_on:
|
||||
umami-db:
|
||||
condition: service_healthy
|
||||
|
||||
umami-db:
|
||||
container_name: {{ customer }}-umami-db
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: 'umami'
|
||||
POSTGRES_USER: '{{ umami_postgres_user }}'
|
||||
POSTGRES_PASSWORD: '{{ umami_postgres_password }}'
|
||||
volumes:
|
||||
- {{ customer }}-umami-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-umami-backups:/tmp/backups
|
||||
restart: always
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
{{ customer }}-umami:
|
||||
ipv4_address: 172.20.13.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-umami:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.13.0/28
|
||||
gateway: 172.20.13.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-umami-postgres:
|
||||
{{ customer }}-umami-backups:
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
uptime-kuma:
|
||||
container_name: {{ customer }}-uptime-kuma
|
||||
image: louislam/uptime-kuma:latest
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-uptimekuma-data:/app/data
|
||||
- {{ customer }}-uptimekuma-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3005:3001"
|
||||
networks:
|
||||
{{ customer }}-uptime-kuma:
|
||||
ipv4_address: 172.20.14.2
|
||||
|
||||
networks:
|
||||
{{ customer }}-uptime-kuma:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.14.0/28
|
||||
gateway: 172.20.14.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-uptimekuma-data:
|
||||
{{ customer }}-uptimekuma-backups:
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
# Vaultwarden Environment Configuration
|
||||
# Copy to .env and configure
|
||||
|
||||
# Admin token for /admin panel access
|
||||
# Generate with: openssl rand -base64 48
|
||||
ADMIN_TOKEN=
|
||||
|
||||
# SMTP password for sending invite emails
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# SSO Client Secret (when Keycloak is enabled)
|
||||
# SSO_CLIENT_SECRET=
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
services:
|
||||
vaultwarden:
|
||||
container_name: {{ customer }}-vaultwarden
|
||||
image: vaultwarden/server:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
# Domain configuration
|
||||
DOMAIN: https://vault.{{ domain }}
|
||||
|
||||
# Admin panel - generate secure token: openssl rand -base64 48
|
||||
ADMIN_TOKEN: ${ADMIN_TOKEN}
|
||||
|
||||
# Signup controls - enable for initial setup, disable after first user created
|
||||
SIGNUPS_ALLOWED: "true"
|
||||
INVITATIONS_ALLOWED: "true"
|
||||
SHOW_PASSWORD_HINT: "false"
|
||||
|
||||
# SSO/OpenID Connect (Keycloak integration) - disabled for initial testing
|
||||
# Enable after Keycloak is configured with vaultwarden client
|
||||
SSO_ENABLED: "false"
|
||||
# SSO_ONLY: "false"
|
||||
# SSO_AUTHORITY: https://auth.{{ domain }}/realms/{{ customer }}
|
||||
# SSO_CLIENT_ID: vaultwarden
|
||||
# SSO_CLIENT_SECRET: ${SSO_CLIENT_SECRET}
|
||||
# SSO_PKCE: "true"
|
||||
|
||||
# SMTP configuration
|
||||
SMTP_HOST: mail.{{ domain }}
|
||||
SMTP_FROM: noreply@{{ domain }}
|
||||
SMTP_USERNAME: noreply@{{ domain }}
|
||||
SMTP_PASSWORD: ${SMTP_PASSWORD}
|
||||
SMTP_FROM_NAME: {{ customer | title }} Vault
|
||||
SMTP_PORT: 587
|
||||
SMTP_SECURITY: starttls
|
||||
SMTP_AUTH_MECHANISM: Login
|
||||
|
||||
# Database (SQLite by default, in volume)
|
||||
DATABASE_URL: /data/db.sqlite3
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL: info
|
||||
EXTENDED_LOGGING: "true"
|
||||
|
||||
# Security
|
||||
DISABLE_ADMIN_TOKEN: "false"
|
||||
PASSWORD_ITERATIONS: 600000
|
||||
|
||||
# WebSocket for live sync
|
||||
WEBSOCKET_ENABLED: "true"
|
||||
|
||||
volumes:
|
||||
- {{ customer }}-vaultwarden-data:/data
|
||||
ports:
|
||||
- "127.0.0.1:3071:80"
|
||||
- "127.0.0.1:3072:3012"
|
||||
networks:
|
||||
{{ customer }}-vaultwarden:
|
||||
ipv4_address: 172.20.34.2
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
|
||||
networks:
|
||||
{{ customer }}-vaultwarden:
|
||||
name: {{ customer }}-vaultwarden
|
||||
driver: bridge
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.34.0/28
|
||||
gateway: 172.20.34.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-vaultwarden-data:
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
{$BASE_URL} {
|
||||
bind {$ADDRESS}
|
||||
reverse_proxy /ws/* http://lsp:3001
|
||||
# reverse_proxy /ws_mp/* http://multiplayer:3002
|
||||
reverse_proxy /* http://windmill_server:8000
|
||||
}
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
windmill-db:
|
||||
container_name: {{ customer }}-windmill-db
|
||||
# deploy:
|
||||
# replicas: 1
|
||||
image: postgres:15
|
||||
restart: always
|
||||
volumes:
|
||||
- {{ customer }}-windmill-postgres:/var/lib/postgresql/data
|
||||
- {{ customer }}-windmill-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3038:5432"
|
||||
environment:
|
||||
POSTGRES_PASSWORD: '{{ windmill_database_password }}'
|
||||
POSTGRES_DB: windmill
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.2
|
||||
|
||||
windmill_server:
|
||||
container_name: {{ customer }}-windmill-server
|
||||
image: ghcr.io/windmill-labs/windmill:main
|
||||
pull_policy: always
|
||||
# deploy:
|
||||
# replicas: 1
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
ports:
|
||||
- "127.0.0.1:3039:8000"
|
||||
environment:
|
||||
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
|
||||
- BASE_URL='https://{{ domain_windmill }}'
|
||||
- RUST_LOG=info
|
||||
- NUM_WORKERS=0
|
||||
- DISABLE_SERVER=false
|
||||
- METRICS_ADDR=false
|
||||
- REQUEST_SIZE_LIMIT=50097152
|
||||
#- LICENSE_KEY=${WM_LICENSE_KEY}
|
||||
depends_on:
|
||||
windmill-db:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- ./oauth.json:/usr/src/app/oauth.json
|
||||
- {{ customer }}-windmill-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.3
|
||||
|
||||
windmill_worker:
|
||||
container_name: {{ customer }}-windmill-worker
|
||||
image: ghcr.io/windmill-labs/windmill:main
|
||||
pull_policy: always
|
||||
# deploy:
|
||||
# replicas: 1
|
||||
restart: always
|
||||
environment:
|
||||
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
|
||||
- BASE_URL='https://{{ domain_windmill }}'
|
||||
- RUST_LOG=info
|
||||
- DISABLE_SERVER=true
|
||||
- KEEP_JOB_DIR=false
|
||||
- METRICS_ADDR=false
|
||||
- WORKER_TAGS=deno,python3,go,bash,powershell,dependency,flow,hub,other,bun
|
||||
#- LICENSE_KEY=${WM_LICENSE_KEY}
|
||||
depends_on:
|
||||
windmill-db:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ./oauth.json:/usr/src/app/oauth.json
|
||||
- {{ customer }}-windmill-worker-cache:/tmp/windmill/cache
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.4
|
||||
|
||||
windmill_worker_native:
|
||||
container_name: {{ customer }}-windmill-worker-native
|
||||
image: ghcr.io/windmill-labs/windmill:main
|
||||
pull_policy: always
|
||||
# deploy:
|
||||
# replicas: 1
|
||||
# resources:
|
||||
# limits:
|
||||
# cpus: "0.25"
|
||||
# memory: 512M
|
||||
restart: always
|
||||
environment:
|
||||
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
|
||||
- BASE_URL='https://{{ domain_windmill }}'
|
||||
- RUST_LOG=info
|
||||
- DISABLE_SERVER=true
|
||||
- KEEP_JOB_DIR=false
|
||||
- METRICS_ADDR=false
|
||||
- NUM_WORKERS=4
|
||||
- WORKER_TAGS=nativets,postgresql,mysql,graphql,snowflake
|
||||
depends_on:
|
||||
windmill-db:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
# See Oauth (https://docs.windmill.dev/docs/misc/setup_oauth)
|
||||
- ./oauth.json:/usr/src/app/oauth.json
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.5
|
||||
|
||||
lsp:
|
||||
container_name: {{ customer }}-windmill-lsp
|
||||
image: ghcr.io/windmill-labs/windmill-lsp:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "127.0.0.1:3041:3001"
|
||||
volumes:
|
||||
- {{ customer }}-windmill-lsp-cache:/root/.cache
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.6
|
||||
|
||||
multiplayer:
|
||||
container_name: {{ customer }}-windmill-multiplayer
|
||||
image: ghcr.io/windmill-labs/windmill-multiplayer:latest
|
||||
# deploy:
|
||||
# replicas: 0 # Set to 1 to enable multiplayer, only available on Enterprise Edition
|
||||
restart: always
|
||||
ports:
|
||||
- "127.0.0.1:3047:3002"
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.7
|
||||
|
||||
caddy:
|
||||
container_name: {{ customer }}-windmill-caddy
|
||||
image: caddy:2.5.2-alpine
|
||||
restart: always
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
- {{ customer }}-windmill-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3014:80"
|
||||
environment:
|
||||
BASE_URL: ":80"
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipv4_address: 172.20.15.8
|
||||
|
||||
networks:
|
||||
{{ customer }}-windmill:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.15.0/28
|
||||
gateway: 172.20.15.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-windmill-worker-cache: null
|
||||
{{ customer }}-windmill-lsp-cache: null
|
||||
{{ customer }}-windmill-postgres:
|
||||
#{{ customer }}-windmill-data:
|
||||
{{ customer }}-windmill-backups:
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
version: '3.9'
|
||||
|
||||
services:
|
||||
wordpress-mysql:
|
||||
container_name: {{ customer }}-wordpress-mysql
|
||||
image: mariadb:10.7.8
|
||||
restart: always
|
||||
ports:
|
||||
- "127.0.0.1:3053:3306"
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: {{ wordpresss_mariadb_root_password }}
|
||||
MYSQL_DATABASE: wordpress
|
||||
MYSQL_USER: {{ wordpress_db_user }}
|
||||
MYSQL_PASSWORD: {{ wordpress_db_password }}
|
||||
volumes:
|
||||
- {{ customer }}-wordpress-mariadb:/var/lib/mysql
|
||||
- {{ customer }}-wordpress-backups:/tmp/backups
|
||||
networks:
|
||||
{{ customer }}-wordpress:
|
||||
ipv4_address: 172.20.16.2
|
||||
|
||||
wordpress:
|
||||
container_name: {{ customer }}-wordpress
|
||||
image: wordpress:php8.2-apache
|
||||
restart: always
|
||||
labels:
|
||||
- "diun.enable=true"
|
||||
volumes:
|
||||
- {{ customer }}-wordpress-data:/var/www/html
|
||||
- {{ customer }}-wordpress-backups:/tmp/backups
|
||||
ports:
|
||||
- "127.0.0.1:3001:80"
|
||||
environment:
|
||||
WORDPRESS_DB_HOST: {{ customer }}-wordpress-mysql
|
||||
WORDPRESS_DB_USER: {{ wordpress_db_user }}
|
||||
WORDPRESS_DB_PASSWORD: {{ wordpress_db_password }}
|
||||
WORDPRESS_DB_NAME: wordpress
|
||||
depends_on:
|
||||
- wordpress-mysql
|
||||
networks:
|
||||
{{ customer }}-wordpress:
|
||||
ipv4_address: 172.20.16.3
|
||||
|
||||
networks:
|
||||
{{ customer }}-wordpress:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.20.16.0/28
|
||||
gateway: 172.20.16.1
|
||||
|
||||
volumes:
|
||||
{{ customer }}-wordpress-mariadb:
|
||||
{{ customer }}-wordpress-data:
|
||||
{{ customer }}-wordpress-backups:
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# LetsBe Backup Rclone Configuration Template
|
||||
#
|
||||
# Copy this file to /root/.config/rclone/rclone.conf and replace
|
||||
# the placeholder values with your S3-compatible storage credentials.
|
||||
#
|
||||
# For MinIO (self-hosted):
|
||||
# endpoint = https://s3.yourdomain.com
|
||||
#
|
||||
# For AWS S3:
|
||||
# provider = AWS
|
||||
# endpoint = (leave empty)
|
||||
# region = us-east-1
|
||||
#
|
||||
# For Backblaze B2:
|
||||
# provider = B2
|
||||
# endpoint = https://s3.us-west-002.backblazeb2.com
|
||||
#
|
||||
# After configuring, verify with:
|
||||
# rclone lsd backup:
|
||||
# rclone mkdir backup:letsbe-backups
|
||||
|
||||
[backup]
|
||||
type = s3
|
||||
provider = Minio
|
||||
env_auth = false
|
||||
access_key_id = ${BACKUP_S3_ACCESS_KEY}
|
||||
secret_access_key = ${BACKUP_S3_SECRET_KEY}
|
||||
endpoint = ${BACKUP_S3_ENDPOINT}
|
||||
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 17f4cf765ccb83970c8c9aacf92c551b6470cdcf
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
# Dependencies
|
||||
node_modules
|
||||
.pnp
|
||||
.pnp.js
|
||||
|
||||
# Build outputs
|
||||
.next
|
||||
out
|
||||
build
|
||||
dist
|
||||
|
||||
# Testing
|
||||
coverage
|
||||
|
||||
# Environment files (secrets!)
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
|
||||
# IDE
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Misc
|
||||
*.md
|
||||
!README.md
|
||||
LICENSE
|
||||
deploy/
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# LetsBe Hub Configuration
|
||||
|
||||
# Database
|
||||
DATABASE_URL=postgresql://hub:hub@db:5432/hub
|
||||
|
||||
# Admin API Key (CHANGE IN PRODUCTION!)
|
||||
ADMIN_API_KEY=change-me-in-production
|
||||
|
||||
# Debug mode
|
||||
DEBUG=false
|
||||
|
||||
# Telemetry retention (days)
|
||||
TELEMETRY_RETENTION_DAYS=90
|
||||
|
||||
# =============================================================================
|
||||
# Email (Resend)
|
||||
# =============================================================================
|
||||
# API key from https://resend.com
|
||||
# RESEND_API_KEY=re_xxxxxxxxxx
|
||||
# Sender email address (must be verified in Resend)
|
||||
# RESEND_FROM_EMAIL=noreply@yourdomain.com
|
||||
|
||||
# =============================================================================
|
||||
# Cron / Scheduled Tasks
|
||||
# =============================================================================
|
||||
# Secret used to authenticate cron job requests
|
||||
# Generate with: openssl rand -hex 32
|
||||
# CRON_SECRET=
|
||||
|
||||
# =============================================================================
|
||||
# Public API
|
||||
# =============================================================================
|
||||
# API key exposed to client-side code (non-sensitive, for rate limiting etc.)
|
||||
# PUBLIC_API_KEY=
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# Database
|
||||
DATABASE_URL="postgresql://letsbe:letsbe@localhost:5432/letsbe_hub"
|
||||
|
||||
# NextAuth.js
|
||||
NEXTAUTH_URL="http://localhost:3000"
|
||||
NEXTAUTH_SECRET="your-secret-key-here-change-in-production"
|
||||
|
||||
# Stripe (Phase 5)
|
||||
# STRIPE_SECRET_KEY="sk_test_..."
|
||||
# STRIPE_WEBHOOK_SECRET="whsec_..."
|
||||
|
||||
# Entri DNS API (Phase 3)
|
||||
# ENTRI_APP_ID="..."
|
||||
# ENTRI_SECRET="..."
|
||||
|
||||
# Runner Authentication
|
||||
RUNNER_TOKEN="change-me-in-production"
|
||||
|
||||
# Admin Setup
|
||||
ADMIN_EMAIL="admin@letsbe.solutions"
|
||||
ADMIN_PASSWORD="change-me-in-production"
|
||||
|
||||
# Hub Internal URL (for runners)
|
||||
HUB_INTERNAL_URL="http://localhost:3000"
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
|
||||
env:
|
||||
REGISTRY: code.letsbe.solutions
|
||||
IMAGE_NAME: letsbe/hub
|
||||
|
||||
jobs:
|
||||
lint-and-typecheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install
|
||||
|
||||
- name: Generate Prisma client
|
||||
run: npx prisma generate
|
||||
|
||||
- name: Run TypeScript check
|
||||
run: npm run typecheck
|
||||
|
||||
- name: Run linter
|
||||
run: npm run lint --if-present
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: lint-and-typecheck
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Gitea Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ gitea.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels)
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
# Dependencies
|
||||
node_modules/
|
||||
.pnp/
|
||||
.pnp.js
|
||||
|
||||
# Build outputs
|
||||
.next/
|
||||
out/
|
||||
dist/
|
||||
build/
|
||||
|
||||
# Testing
|
||||
coverage/
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
deploy/.env
|
||||
!.env.example
|
||||
!.env.local.example
|
||||
!deploy/.env.example
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# Debug
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Typescript
|
||||
*.tsbuildinfo
|
||||
next-env.d.ts
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
*.pem
|
||||
Thumbs.db
|
||||
|
||||
# Serena
|
||||
.serena/
|
||||
|
||||
# Vercel
|
||||
.vercel
|
||||
|
||||
# Prisma
|
||||
prisma/*.db
|
||||
prisma/*.db-journal
|
||||
|
||||
# Job runtime data
|
||||
jobs/
|
||||
|
|
@ -0,0 +1,292 @@
|
|||
# CLAUDE.md — LetsBe Hub
|
||||
|
||||
## Purpose
|
||||
|
||||
You are the engineering assistant for the LetsBe Hub Dashboard.
|
||||
This is the admin dashboard and API for managing the LetsBe Cloud platform.
|
||||
|
||||
The Hub provides:
|
||||
|
||||
- **Admin Dashboard**: Next.js admin UI for platform management
|
||||
- **Customer Management**: Create/manage customers and subscriptions
|
||||
- **Order Management**: Process and track provisioning orders
|
||||
- **Server Monitoring**: View and manage tenant servers
|
||||
- **Netcup Integration**: Full server management via Netcup SCP API
|
||||
- **Token Usage Tracking**: Monitor AI token consumption
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- **Next.js 15** (App Router)
|
||||
- **TypeScript** (strict mode)
|
||||
- **Prisma** (PostgreSQL ORM)
|
||||
- **TanStack Query** (React Query v5)
|
||||
- **Tailwind CSS** + shadcn/ui components
|
||||
- **NextAuth.js** (authentication)
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
src/
|
||||
├── app/ # Next.js App Router
|
||||
│ ├── admin/ # Admin dashboard pages
|
||||
│ │ ├── customers/ # Customer management
|
||||
│ │ │ └── [id]/ # Customer detail with order creation
|
||||
│ │ ├── orders/ # Order management
|
||||
│ │ │ └── [id]/ # Order detail with DNS, provisioning
|
||||
│ │ ├── servers/ # Server monitoring
|
||||
│ │ │ └── netcup/ # Netcup servers management
|
||||
│ │ │ └── [id]/ # Netcup server detail
|
||||
│ │ ├── settings/ # Admin settings
|
||||
│ │ └── layout.tsx # Admin layout with sidebar
|
||||
│ ├── api/v1/ # API routes
|
||||
│ │ ├── admin/ # Admin API endpoints
|
||||
│ │ │ ├── customers/ # Customer CRUD
|
||||
│ │ │ ├── orders/ # Order CRUD + provisioning
|
||||
│ │ │ ├── netcup/ # Netcup SCP API integration
|
||||
│ │ │ └── servers/ # Server management
|
||||
│ │ └── public/ # Public API endpoints
|
||||
│ └── (auth)/ # Authentication pages
|
||||
├── components/
|
||||
│ ├── admin/ # Admin-specific components
|
||||
│ │ ├── create-order-dialog.tsx # Order creation wizard
|
||||
│ │ ├── netcup-auth-setup.tsx # Netcup OAuth setup
|
||||
│ │ ├── netcup-server-link.tsx # Link orders to Netcup servers
|
||||
│ │ └── dns-verification-panel.tsx # DNS verification UI
|
||||
│ └── ui/ # Reusable UI components (shadcn/ui)
|
||||
├── hooks/ # React Query hooks
|
||||
│ ├── use-customers.ts # Customer data hooks
|
||||
│ ├── use-orders.ts # Order data hooks
|
||||
│ ├── use-netcup.ts # Netcup API hooks
|
||||
│ └── use-dns.ts # DNS verification hooks
|
||||
├── lib/ # Utilities and shared code
|
||||
│ ├── prisma.ts # Prisma client singleton
|
||||
│ └── services/ # Backend services
|
||||
│ ├── netcup-service.ts # Netcup SCP API client
|
||||
│ ├── dns-service.ts # DNS verification service
|
||||
│ └── settings-service.ts # System settings storage
|
||||
└── types/ # TypeScript type definitions
|
||||
```
|
||||
|
||||
## API Routes
|
||||
|
||||
### Admin Endpoints (authenticated)
|
||||
|
||||
```
|
||||
# Customers
|
||||
GET /api/v1/admin/customers # List customers
|
||||
GET /api/v1/admin/customers/[id] # Get customer detail
|
||||
PATCH /api/v1/admin/customers/[id] # Update customer
|
||||
|
||||
# Orders
|
||||
GET /api/v1/admin/orders # List orders
|
||||
POST /api/v1/admin/orders # Create order
|
||||
GET /api/v1/admin/orders/[id] # Get order detail
|
||||
PATCH /api/v1/admin/orders/[id] # Update order
|
||||
GET /api/v1/admin/orders/[id]/logs # Get provisioning logs (SSE)
|
||||
POST /api/v1/admin/orders/[id]/provision # Start provisioning
|
||||
|
||||
# DNS Verification
|
||||
GET /api/v1/admin/orders/[id]/dns # Get DNS status
|
||||
POST /api/v1/admin/orders/[id]/dns/verify # Trigger DNS verification
|
||||
POST /api/v1/admin/orders/[id]/dns/skip # Manual DNS override
|
||||
|
||||
# Servers
|
||||
GET /api/v1/admin/servers # List servers (derived from orders)
|
||||
|
||||
# Netcup Integration
|
||||
GET /api/v1/admin/netcup/auth # Get auth status / poll for token
|
||||
POST /api/v1/admin/netcup/auth # Initiate device auth flow
|
||||
DELETE /api/v1/admin/netcup/auth # Disconnect Netcup account
|
||||
GET /api/v1/admin/netcup/servers # List all Netcup servers
|
||||
GET /api/v1/admin/netcup/servers/[id] # Get server detail
|
||||
POST /api/v1/admin/netcup/servers/[id]/power # Power actions
|
||||
POST /api/v1/admin/netcup/servers/[id]/rescue # Rescue mode
|
||||
GET /api/v1/admin/netcup/servers/[id]/metrics # Performance metrics
|
||||
GET /api/v1/admin/netcup/servers/[id]/snapshots # List snapshots
|
||||
POST /api/v1/admin/netcup/servers/[id]/snapshots # Create snapshot
|
||||
|
||||
# Dashboard
|
||||
GET /api/v1/admin/dashboard/stats # Dashboard statistics
|
||||
```
|
||||
|
||||
## Netcup SCP Integration
|
||||
|
||||
The Hub integrates with Netcup's Server Control Panel API for full server management.
|
||||
|
||||
### Authentication
|
||||
Uses OAuth2 Device Flow:
|
||||
1. Hub initiates device auth, gets `user_code` and `verification_uri`
|
||||
2. User visits Netcup and enters the code
|
||||
3. Hub polls for token exchange
|
||||
4. Tokens stored in `SystemSettings` table
|
||||
5. Access tokens auto-refresh (5min expiry, offline refresh token)
|
||||
|
||||
### Capabilities
|
||||
- **Server List**: View all Netcup servers with live status
|
||||
- **Power Control**: ON/OFF/POWERCYCLE/RESET/POWEROFF
|
||||
- **Rescue Mode**: Activate/deactivate rescue system
|
||||
- **Metrics**: CPU, disk I/O, network throughput (up to 30 days)
|
||||
- **Snapshots**: Create, list, delete, revert snapshots
|
||||
- **Server Linking**: Link orders to Netcup servers by IP
|
||||
|
||||
### Key Service: `netcup-service.ts`
|
||||
```typescript
|
||||
// Core methods
|
||||
netcupService.initiateDeviceAuth() // Start OAuth flow
|
||||
netcupService.pollForToken(deviceCode) // Complete OAuth
|
||||
netcupService.getServers() // List with IPs from interfaces
|
||||
netcupService.getServer(id, liveInfo) // Detail with live status
|
||||
netcupService.powerAction(id, action) // Power control
|
||||
netcupService.getServerInterfaces(id) // Get IP addresses
|
||||
netcupService.getAllMetrics(id, hours) // CPU/disk/network metrics
|
||||
```
|
||||
|
||||
## Development Commands
|
||||
|
||||
```bash
|
||||
# Start database (required first)
|
||||
docker compose up -d
|
||||
|
||||
# Install dependencies
|
||||
npm install
|
||||
|
||||
# Start development server
|
||||
npm run dev
|
||||
|
||||
# Run database migrations
|
||||
npx prisma migrate dev
|
||||
|
||||
# Generate Prisma client
|
||||
npx prisma generate
|
||||
|
||||
# Seed database
|
||||
npm run db:seed
|
||||
|
||||
# Type checking
|
||||
npm run typecheck
|
||||
|
||||
# Build for production
|
||||
npm run build
|
||||
|
||||
# App available at http://localhost:3000
|
||||
```
|
||||
|
||||
## Key Patterns
|
||||
|
||||
### React Query Hooks
|
||||
|
||||
All data fetching uses React Query hooks in `src/hooks/`:
|
||||
- `useCustomers()`, `useCustomer(id)` - Customer data
|
||||
- `useOrders()`, `useOrder(id)` - Order data
|
||||
- `useServers()` - Server list
|
||||
- `useDashboardStats()` - Dashboard metrics
|
||||
- `useNetcupServers()`, `useNetcupServer(id)` - Netcup servers
|
||||
- `useNetcupAuth()` - Netcup authentication status
|
||||
- `useServerMetrics(id, hours)` - Server performance metrics
|
||||
- `useServerSnapshots(id)` - Server snapshots
|
||||
|
||||
Mutations follow the pattern:
|
||||
- `useCreateOrder()`, `useUpdateOrder()`
|
||||
- `useNetcupPowerAction()`, `useNetcupRescue()`
|
||||
- `useCreateSnapshot()`, `useDeleteSnapshot()`, `useRevertSnapshot()`
|
||||
- Automatic cache invalidation via `queryClient.invalidateQueries()`
|
||||
|
||||
### API Route Pattern
|
||||
|
||||
```typescript
|
||||
export async function GET(request: NextRequest) {
|
||||
// Auth check
|
||||
const session = await auth()
|
||||
if (!session || session.user.userType !== 'staff') {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
// Parse query params
|
||||
const searchParams = request.nextUrl.searchParams
|
||||
|
||||
// Database query with Prisma
|
||||
const data = await prisma.model.findMany({...})
|
||||
|
||||
// Return JSON response
|
||||
return NextResponse.json(data)
|
||||
}
|
||||
```
|
||||
|
||||
### Component Pattern
|
||||
|
||||
```typescript
|
||||
'use client'
|
||||
|
||||
export function MyComponent() {
|
||||
const { data, isLoading, error } = useMyData()
|
||||
|
||||
if (isLoading) return <Skeleton />
|
||||
if (error) return <ErrorMessage error={error} />
|
||||
|
||||
return <div>...</div>
|
||||
}
|
||||
```
|
||||
|
||||
### Service Pattern
|
||||
|
||||
Backend services in `src/lib/services/`:
|
||||
```typescript
|
||||
class MyService {
|
||||
private static instance: MyService
|
||||
|
||||
static getInstance(): MyService {
|
||||
if (!MyService.instance) {
|
||||
MyService.instance = new MyService()
|
||||
}
|
||||
return MyService.instance
|
||||
}
|
||||
|
||||
async doSomething(): Promise<Result> {
|
||||
// Implementation
|
||||
}
|
||||
}
|
||||
|
||||
export const myService = MyService.getInstance()
|
||||
```
|
||||
|
||||
## Database Schema (Key Models)
|
||||
|
||||
```prisma
|
||||
model Customer {
|
||||
id String @id @default(cuid())
|
||||
name String
|
||||
email String @unique
|
||||
company String?
|
||||
orders Order[]
|
||||
}
|
||||
|
||||
model Order {
|
||||
id String @id @default(cuid())
|
||||
status OrderStatus
|
||||
domain String
|
||||
customerId String
|
||||
serverIp String?
|
||||
serverPassword String?
|
||||
netcupServerId String? # Linked Netcup server
|
||||
automationMode AutomationMode @default(MANUAL)
|
||||
customer Customer @relation(...)
|
||||
dnsVerification DnsVerification?
|
||||
}
|
||||
|
||||
model SystemSettings {
|
||||
id String @id @default(cuid())
|
||||
key String @unique
|
||||
value String @db.Text
|
||||
}
|
||||
```
|
||||
|
||||
## Coding Conventions
|
||||
|
||||
- Use `'use client'` directive for client components
|
||||
- All API routes return `NextResponse.json()`
|
||||
- Use Prisma for all database operations
|
||||
- Follow existing shadcn/ui component patterns
|
||||
- Use React Query for server state management
|
||||
- TypeScript strict mode - no `any` types
|
||||
- Services are singletons exported from `lib/services/`
|
||||
- Environment variables in `.env.local` (never commit)
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
FROM node:20-alpine AS base
|
||||
|
||||
# Install dependencies only when needed
|
||||
FROM base AS deps
|
||||
RUN apk add --no-cache libc6-compat
|
||||
WORKDIR /app
|
||||
|
||||
# Install dependencies
|
||||
COPY package.json package-lock.json* ./
|
||||
RUN npm install
|
||||
|
||||
# Generate Prisma Client (Prisma 7 uses prisma.config.mjs for datasource URL)
|
||||
COPY prisma ./prisma/
|
||||
COPY prisma.config.mjs ./
|
||||
RUN npx prisma generate
|
||||
|
||||
# Rebuild the source code only when needed
|
||||
FROM base AS builder
|
||||
WORKDIR /app
|
||||
COPY --from=deps /app/node_modules ./node_modules
|
||||
COPY . .
|
||||
|
||||
# Ensure public directory exists
|
||||
RUN mkdir -p public
|
||||
|
||||
# Next.js telemetry
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
RUN npm run build
|
||||
|
||||
# Production image, copy all the files and run next
|
||||
FROM base AS runner
|
||||
WORKDIR /app
|
||||
|
||||
ENV NODE_ENV=production
|
||||
ENV NEXT_TELEMETRY_DISABLED=1
|
||||
|
||||
# Install Docker CLI for spawning provisioning containers
|
||||
RUN apk add --no-cache docker-cli
|
||||
|
||||
RUN addgroup --system --gid 1001 nodejs
|
||||
RUN adduser --system --uid 1001 nextjs
|
||||
|
||||
# Add nextjs user to docker group for socket access
|
||||
# Note: The actual docker group GID might differ - using 999 as common default
|
||||
RUN addgroup -g 999 docker || true
|
||||
RUN addgroup nextjs docker || true
|
||||
|
||||
# Create jobs and logs directories for provisioning
|
||||
RUN mkdir -p /app/jobs /app/logs
|
||||
RUN chown -R nextjs:nodejs /app/jobs /app/logs
|
||||
|
||||
# Create public directory and copy contents if they exist
|
||||
RUN mkdir -p public
|
||||
COPY --from=builder /app/public/. ./public/
|
||||
|
||||
# Set the correct permission for prerender cache
|
||||
RUN mkdir .next
|
||||
RUN chown nextjs:nodejs .next
|
||||
|
||||
# Automatically leverage output traces to reduce image size
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
|
||||
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
|
||||
|
||||
# Copy Prisma client and schema (for runtime + migrations)
|
||||
COPY --from=deps /app/node_modules/.prisma ./node_modules/.prisma
|
||||
COPY --from=deps /app/node_modules/@prisma ./node_modules/@prisma
|
||||
COPY prisma ./prisma/
|
||||
COPY prisma.config.mjs ./
|
||||
|
||||
# Install Prisma CLI globally for running migrations on startup
|
||||
# (copying just node_modules/prisma misses transitive deps like valibot)
|
||||
RUN npm install -g prisma@7
|
||||
|
||||
# Copy startup script (runs migrations before starting app)
|
||||
# Use tr to strip Windows CRLF line endings (more reliable than sed on Alpine)
|
||||
COPY startup.sh /tmp/startup.sh
|
||||
RUN tr -d '\r' < /tmp/startup.sh > startup.sh && chmod +x startup.sh && rm /tmp/startup.sh
|
||||
|
||||
USER nextjs
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
ENV PORT=3000
|
||||
ENV HOSTNAME="0.0.0.0"
|
||||
|
||||
CMD ["./startup.sh"]
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# LetsBe Hub Production Configuration
|
||||
# Copy this file to .env and fill in the values
|
||||
|
||||
# =============================================================================
|
||||
# REQUIRED - Must be set before deployment
|
||||
# =============================================================================
|
||||
|
||||
# Hub public URL (used for auth callbacks and runner communication)
|
||||
HUB_URL=https://hub.yourdomain.com
|
||||
|
||||
# Database password (generate a strong random password)
|
||||
POSTGRES_PASSWORD=CHANGE_ME_STRONG_PASSWORD_HERE
|
||||
|
||||
# NextAuth secret (generate with: openssl rand -base64 32)
|
||||
NEXTAUTH_SECRET=CHANGE_ME_GENERATE_WITH_OPENSSL_RAND_BASE64_32
|
||||
|
||||
# Credential encryption key (generate with: openssl rand -hex 32)
|
||||
CREDENTIAL_ENCRYPTION_KEY=CHANGE_ME_GENERATE_WITH_OPENSSL_RAND_HEX_32
|
||||
|
||||
# Settings encryption key (generate with: openssl rand -hex 32)
|
||||
SETTINGS_ENCRYPTION_KEY=CHANGE_ME_GENERATE_WITH_OPENSSL_RAND_HEX_32
|
||||
|
||||
# =============================================================================
|
||||
# OPTIONAL - Defaults are usually fine
|
||||
# =============================================================================
|
||||
|
||||
# Database settings
|
||||
POSTGRES_USER=letsbe_hub
|
||||
POSTGRES_DB=letsbe_hub
|
||||
|
||||
# Hub port (change if 3000 is occupied)
|
||||
HUB_PORT=3847
|
||||
|
||||
# Hub image tag (default: master)
|
||||
HUB_IMAGE_TAG=master
|
||||
|
||||
# Ansible Runner settings
|
||||
DOCKER_REGISTRY_URL=code.letsbe.solutions
|
||||
DOCKER_IMAGE_NAME=letsbe/ansible-runner
|
||||
DOCKER_IMAGE_TAG=master
|
||||
DOCKER_MAX_CONCURRENT=3
|
||||
|
||||
# Host paths for job configs (runner containers need access)
|
||||
# These directories will be created automatically by Docker
|
||||
JOBS_HOST_DIR=/opt/letsbe-hub/jobs
|
||||
LOGS_HOST_DIR=/opt/letsbe-hub/logs
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue