Include full contents of all nested repositories

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-27 16:25:02 +01:00
parent 14ff8fd54c
commit 2401ed446f
7271 changed files with 1310112 additions and 6 deletions

View File

@@ -0,0 +1,31 @@
## It's advisable to consult the documentation and use the tools/deploy.sh to generate the passwords, keys, instead of manually filling them.
AP_ENGINE_EXECUTABLE_PATH=dist/packages/engine/main.js
## Random Long Password (Optional for community edition)
AP_API_KEY={{ activepieces_api_key }}
## 256 bit encryption key, 32 hex character
AP_ENCRYPTION_KEY={{ activepieces_encryption_key }}
## JWT Secret
AP_JWT_SECRET={{ activepieces_jwt_secret }}
AP_ENVIRONMENT=prod
AP_FRONTEND_URL=https://{{ domain_activepieces }}
AP_WEBHOOK_TIMEOUT_SECONDS=30
AP_TRIGGER_DEFAULT_POLL_INTERVAL=5
AP_POSTGRES_DATABASE=activepieces
AP_POSTGRES_HOST=postgres
AP_POSTGRES_PORT=5432
AP_POSTGRES_USERNAME=activepieces-postgres
AP_POSTGRES_PASSWORD={{ activepieces_postgres_password }}
AP_EXECUTION_MODE=UNSANDBOXED
AP_REDIS_HOST=redis
AP_REDIS_PORT=6379
AP_FLOW_TIMEOUT_SECONDS=600
AP_TELEMETRY_ENABLED=true
AP_TEMPLATES_SOURCE_URL="https://cloud.activepieces.com/api/v1/flow-templates"
AP_PROJECT_RATE_LIMITER_ENABLED=false
AP_PIECES_SOURCE=DB
AP_FILE_STORAGE_LOCATION=DB

View File

@@ -0,0 +1,52 @@
version: '3.0'
services:
activepieces:
image: ghcr.io/activepieces/activepieces:0.39.2
container_name: {{ customer }}-activepieces
restart: unless-stopped
labels:
- "diun.enable=true"
## Enable the following line if you already use AP_EXECUTION_MODE with SANDBOXED or old activepieces, checking the breaking change documentation for mor>
ports:
- '3056:80'
depends_on:
- postgres
- redis
env_file: /opt/letsbe/env/activepieces.env
networks:
{{ customer }}-activepieces:
ipv4_address: 172.20.27.2
postgres:
image: 'postgres:14.4'
container_name: activepieces-postgres
restart: unless-stopped
environment:
- 'POSTGRES_DB=${AP_POSTGRES_DATABASE}'
- 'POSTGRES_PASSWORD=${AP_POSTGRES_PASSWORD}'
- 'POSTGRES_USER=${AP_POSTGRES_USERNAME}'
volumes:
- activepieces_postgres_data:/var/lib/postgresql/data
networks:
{{ customer }}-activepieces:
ipv4_address: 172.20.27.3
redis:
image: 'redis:7.0.7'
container_name: activepieces-redis
restart: unless-stopped
volumes:
- 'activepieces_redis_data:/data'
networks:
{{ customer }}-activepieces:
ipv4_address: 172.20.27.4
volumes:
activepieces_postgres_data:
activepieces_redis_data:
networks:
{{ customer }}-activepieces:
ipam:
config:
- subnet: 172.20.27.0/28
gateway: 172.20.27.1

View File

@@ -0,0 +1,61 @@
# Set this value to 'agree' to accept our license:
# LICENSE: https://github.com/calendso/calendso/blob/main/LICENSE
#
# Summary of terms:
# - The codebase has to stay open source, whether it was modified or not
# - You can not repackage or sell the codebase
# - Acquire a commercial license to remove these terms by emailing: license@cal.com
NEXT_PUBLIC_LICENSE_CONSENT=
LICENSE=
# BASE_URL and NEXT_PUBLIC_APP_URL are both deprecated. Both are replaced with one variable, NEXT_PUBLIC_WEBAPP_URL
# BASE_URL=http://localhost:3000
# NEXT_PUBLIC_APP_URL=http://localhost:3000
NEXT_PUBLIC_WEBAPP_URL=https://{{ domain_calcom }}
# Configure NEXTAUTH_URL manually if needed, otherwise it will resolve to {NEXT_PUBLIC_WEBAPP_URL}/api/auth
# NEXTAUTH_URL=http://localhost:3000/api/auth
# It is highly recommended that the NEXTAUTH_SECRET must be overridden and very unique
# Use `openssl rand -base64 32` to generate a key
NEXTAUTH_SECRET={{ calcom_nextauth_secret }}
# Encryption key that will be used to encrypt CalDAV credentials, choose a random string, for example with `dd if=/dev/urandom bs=1K count=1 | md5sum`
CALENDSO_ENCRYPTION_KEY=md5sum
# Deprecation note: JWT_SECRET is no longer used
# JWT_SECRET=secret
POSTGRES_USER={{ calcom_postgres_user }}
POSTGRES_PASSWORD={{ calcom_postgres_password }}
POSTGRES_DB=calcom
DATABASE_HOST={{ customer }}-calcom-postgres:5432
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
GOOGLE_API_CREDENTIALS={}
#Fix calcom db migration issues
DATABASE_DIRECT_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
# Set this to '1' if you don't want Cal to collect anonymous usage
CALCOM_TELEMETRY_DISABLED=1
# Used for the Office 365 / Outlook.com Calendar integration
MS_GRAPH_CLIENT_ID=
MS_GRAPH_CLIENT_SECRET=
# Used for the Zoom integration
ZOOM_CLIENT_ID=
ZOOM_CLIENT_SECRET=
# E-mail settings
# Configures the global From: header whilst sending emails.
EMAIL_FROM=system@{{ domain }}
SUPPORT_MAIL_ADDRESS=support@{{ domain }}
# Configure SMTP settings (@see https://nodemailer.com/smtp/).
EMAIL_SERVER_HOST=mail.{{ domain }}
EMAIL_SERVER_PORT=587
EMAIL_SERVER_USER=system@{{ domain }}
EMAIL_SERVER_PASSWORD=
#EMAIL_SERVER_SECURE=false
NODE_ENV=production

View File

@@ -0,0 +1,41 @@
services:
calcom-postgres:
container_name: {{ customer }}-calcom-postgres
image: postgres:16 #original postgres
restart: always
volumes:
- {{ customer }}-calcom-postgres:/var/lib/postgresql/data/
- {{ customer }}-calcom-backups:/tmp/backups
env_file: /opt/letsbe/env/calcom.env
networks:
{{ customer }}-calcom:
ipv4_address: 172.20.18.2
calcom:
container_name: {{ customer }}-calcom
image: calcom/cal.com:v5.9.8
restart: always
labels:
- "diun.enable=true"
ports:
- '127.0.0.1:3018:3000'
env_file: /opt/letsbe/env/calcom.env
environment:
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DATABASE_HOST}/${POSTGRES_DB}
depends_on:
- calcom-postgres
networks:
{{ customer }}-calcom:
ipv4_address: 172.20.18.3
networks:
{{ customer }}-calcom:
ipam:
driver: default
config:
- subnet: 172.20.18.0/28
gateway: 172.20.18.1
volumes:
{{ customer }}-calcom-postgres:
{{ customer }}-calcom-backups:

View File

@@ -0,0 +1,239 @@
SECRET_KEY_BASE={{ chatwoot_secret_key_base }}
# Replace with the URL you are planning to use for your app
FRONTEND_URL=https://{{ domain_chatwoot }}
# To use a dedicated URL for help center pages
HELPCENTER_URL=https://{{ domain_chatwoot_helpdesk }}
# If the variable is set, all non-authenticated pages would fallback to the default locale.
# Whenever a new account is created, the default language will be DEFAULT_LOCALE instead of en
# DEFAULT_LOCALE=en
# If you plan to use CDN for your assets, set Asset CDN Host
ASSET_CDN_HOST=
# Force all access to the app over SSL, default is set to false
FORCE_SSL=false
# This lets you control new sign ups on your chatwoot installation
# true : default option, allows sign ups
# false : disables all the end points related to sign ups
# api_only: disables the UI for signup, but you can create sign ups via the account apis
ENABLE_ACCOUNT_SIGNUP=false
# Redis config
REDIS_URL=redis://:{{ chatwoot_redis_password }}@redis:6379
# If you are using docker-compose, set this variable's value to be any string,
# which will be the password for the redis service running inside the docker-compose
# to make it secure
REDIS_PASSWORD={{ chatwoot_redis_password }}
# Redis Sentinel can be used by passing list of sentinel host and ports e,g. sentinel_host1:port1,sentinel_host2:port2
REDIS_SENTINELS=
# Redis sentinel master name is required when using sentinel, default value is "mymaster".
# You can find list of master using "SENTINEL masters" command
REDIS_SENTINEL_MASTER_NAME=
# By default Chatwoot will pass REDIS_PASSWORD as the password value for sentinels
# Use the following environment variable to customize passwords for sentinels.
# Use empty string if sentinels are configured with out passwords
# REDIS_SENTINEL_PASSWORD=
# Redis premium breakage in heroku fix
# enable the following configuration
# ref: https://github.com/chatwoot/chatwoot/issues/2420
# REDIS_OPENSSL_VERIFY_MODE=none
# Postgres Database config variables
# You can leave POSTGRES_DATABASE blank. The default name of
# the database in the production environment is chatwoot_production
POSTGRES_DATABASE=chatwoot_production
POSTGRES_HOST=postgres
POSTGRES_USERNAME={{ chatwoot_postgres_username }}
POSTGRES_PASSWORD={{ chatwoot_postgres_password }}
RAILS_ENV=production
# Changes the Postgres query timeout limit. The default is 14 seconds. Modify only when required.
# POSTGRES_STATEMENT_TIMEOUT=14s
RAILS_MAX_THREADS=5
# The email from which all outgoing emails are sent
# could user either `email@yourdomain.com` or `BrandName <email@yourdomain.com>`
MAILER_SENDER_EMAIL={{ company_name }} <support@{{ domain }}>
#SMTP domain key is set up for HELO checking
SMTP_DOMAIN=mail.{{ domain }}
# Set the value to "mailhog" if using docker-compose for development environments,
# Set the value as "localhost" or your SMTP address in other environments
# If SMTP_ADDRESS is empty, Chatwoot would try to use sendmail(postfix)
SMTP_ADDRESS=support@{{ domain }}
SMTP_PORT=587
SMTP_USERNAME=support@{{ domain }} # Optional, only if SMTP server requires authentication
SMTP_PASSWORD= # Optional, only if SMTP server requires authentication
# plain,login,cram_md5
SMTP_AUTHENTICATION=login
SMTP_ENABLE_STARTTLS_AUTO=true
# Can be: 'none', 'peer', 'client_once', 'fail_if_no_peer_cert', see http://api.rubyonrails.org/classes/ActionMailer/Base.html
SMTP_OPENSSL_VERIFY_MODE=peer
# Comment out the following environment variables if required by your SMTP server
SMTP_TLS=true
SMTP_SSL=
# Mail Incoming
# This is the domain set for the reply emails when conversation continuity is enabled
MAILER_INBOUND_EMAIL_DOMAIN={{ domain }}
# Set this to appropriate ingress channel with regards to incoming emails
# Possible values are :
# relay for Exim, Postfix, Qmail
# mailgun for Mailgun
# mandrill for Mandrill
# postmark for Postmark
# sendgrid for Sendgrid
RAILS_INBOUND_EMAIL_SERVICE=relay
# Use one of the following based on the email ingress service
# Ref: https://edgeguides.rubyonrails.org/action_mailbox_basics.html
RAILS_INBOUND_EMAIL_PASSWORD= {{ chatwoot_rails_inbound_email_password }}
MAILGUN_INGRESS_SIGNING_KEY=
MANDRILL_INGRESS_API_KEY=
# Storage
ACTIVE_STORAGE_SERVICE=local
# Amazon S3
# documentation: https://www.chatwoot.com/docs/configuring-s3-bucket-as-cloud-storage
S3_BUCKET_NAME=
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_REGION=
# Log settings
# Disable if you want to write logs to a file
RAILS_LOG_TO_STDOUT=true
LOG_LEVEL=info
LOG_SIZE=500
# Configure this environment variable if you want to use lograge instead of rails logger
#LOGRAGE_ENABLED=true
### This environment variables are only required if you are setting up social media channels
# Facebook
# documentation: https://www.chatwoot.com/docs/facebook-setup
FB_VERIFY_TOKEN=
FB_APP_SECRET=
FB_APP_ID=
# https://developers.facebook.com/docs/messenger-platform/instagram/get-started#app-dashboard
IG_VERIFY_TOKEN=
# Twitter
# documentation: https://www.chatwoot.com/docs/twitter-app-setup
TWITTER_APP_ID=
TWITTER_CONSUMER_KEY=
TWITTER_CONSUMER_SECRET=
TWITTER_ENVIRONMENT=
#slack integration
SLACK_CLIENT_ID=
SLACK_CLIENT_SECRET=
# Google OAuth
GOOGLE_OAUTH_CLIENT_ID=
GOOGLE_OAUTH_CLIENT_SECRET=
GOOGLE_OAUTH_CALLBACK_URL=
### Change this env variable only if you are using a custom build mobile app
## Mobile app env variables
IOS_APP_ID=L7YLMN4634.com.chatwoot.app
ANDROID_BUNDLE_ID=com.chatwoot.app
# https://developers.google.com/android/guides/client-auth (use keytool to print the fingerprint in the first section)
ANDROID_SHA256_CERT_FINGERPRINT=AC:73:8E:DE:EB:56:EA:CC:10:87:02:A7:65:37:7B:38:D4:5D:D4:53:F8:3B:FB:D3:C6:28:64:1D:AA:08:1E:D8
### Smart App Banner
# https://developer.apple.com/library/archive/documentation/AppleApplications/Reference/SafariWebContent/PromotingAppswithAppBanners/PromotingAppswithAppBanners.html
# You can find your app-id in https://itunesconnect.apple.com
#IOS_APP_IDENTIFIER=1495796682
## Push Notification
## generate a new key value here : https://d3v.one/vapid-key-generator/
# VAPID_PUBLIC_KEY=
# VAPID_PRIVATE_KEY=
#
# for mobile apps
# FCM_SERVER_KEY=
### APM and Error Monitoring configurations
## Elastic APM
## https://www.elastic.co/guide/en/apm/agent/ruby/current/getting-started-rails.html
# ELASTIC_APM_SERVER_URL=
# ELASTIC_APM_SECRET_TOKEN=
## Sentry
# SENTRY_DSN=
## LogRocket
# LOG_ROCKET_PROJECT_ID=xxxxx/some-project
# MICROSOFT CLARITY
# MS_CLARITY_TOKEN=xxxxxxxxx
## Scout
## https://scoutapm.com/docs/ruby/configuration
# SCOUT_KEY=YOURKEY
# SCOUT_NAME=YOURAPPNAME (Production)
# SCOUT_MONITOR=true
## NewRelic
# https://docs.newrelic.com/docs/agents/ruby-agent/configuration/ruby-agent-configuration/
# NEW_RELIC_LICENSE_KEY=
# Set this to true to allow newrelic apm to send logs.
# This is turned off by default.
# NEW_RELIC_APPLICATION_LOGGING_ENABLED=
## Datadog
## https://github.com/DataDog/dd-trace-rb/blob/master/docs/GettingStarted.md#environment-variables
# DD_TRACE_AGENT_URL=
# MaxMindDB API key to download GeoLite2 City database
# IP_LOOKUP_API_KEY=
## Rack Attack configuration
## To prevent and throttle abusive requests
# ENABLE_RACK_ATTACK=true
## Running chatwoot as an API only server
## setting this value to true will disable the frontend dashboard endpoints
# CW_API_ONLY_SERVER=false
## Development Only Config
# if you want to use letter_opener for local emails
# LETTER_OPENER=true
# meant to be used in github codespaces
# WEBPACKER_DEV_SERVER_PUBLIC=
# If you want to use official mobile app,
# the notifications would be relayed via a Chatwoot server
ENABLE_PUSH_RELAY_SERVER=true
# Stripe API key
STRIPE_SECRET_KEY=
STRIPE_WEBHOOK_SECRET=
# Set to true if you want to upload files to cloud storage using the signed url
# Make sure to follow https://edgeguides.rubyonrails.org/active_storage_overview.html#cross-origin-resource-sharing-cors-configuration on the cloud storage after setting this to true.
DIRECT_UPLOADS_ENABLED=
#MS OAUTH creds
AZURE_APP_ID=
AZURE_APP_SECRET=
## Advanced configurations
## Change these values to fine tune performance
# control the concurrency setting of sidekiq
# SIDEKIQ_CONCURRENCY=10
# AI powered features
## OpenAI key
OPENAI_API_KEY=
# Sentiment analysis model file path
SENTIMENT_FILE_PATH=

View File

@@ -0,0 +1,121 @@
version: '3'
services:
rails:
image: chatwoot/chatwoot:latest
container_name: {{ customer }}-chatwoot-rails
env_file: /opt/letsbe/env/chatwoot.env
restart: always
labels:
- "diun.enable=true"
depends_on:
- postgres
- redis
ports:
- '127.0.0.1:3011:3000'
environment:
- NODE_ENV=production
- RAILS_ENV=production
- INSTALLATION_ENV=docker
entrypoint: docker/entrypoints/rails.sh
command: ['bundle', 'exec', 'rails', 's', '-p', '3000', '-b', '0.0.0.0']
volumes:
- {{ customer }}-chatwoot-storage:/app/storage
- {{ customer }}-chatwoot-backups:/tmp/backups
networks:
{{ customer }}-chatwoot:
ipv4_address: 172.20.1.2
sidekiq:
container_name: {{ customer }}-chatwoot-sidekiq
image: chatwoot/chatwoot:latest
restart: always
env_file: /opt/letsbe/env/chatwoot.env
depends_on:
- postgres
- redis
environment:
- NODE_ENV=production
- RAILS_ENV=production
- INSTALLATION_ENV=docker
command: ['bundle', 'exec', 'sidekiq', '-C', 'config/sidekiq.yml']
volumes:
- {{ customer }}-chatwoot-storage:/app/storage
networks:
{{ customer }}-chatwoot:
ipv4_address: 172.20.1.3
postgres:
container_name: {{ customer }}-chatwoot-postgres
image: pgvector/pgvector:pg16
restart: always
ports:
- '127.0.0.1:3049:5432'
volumes:
- {{ customer }}-chatwoot-postgres:/var/lib/postgresql/data
- {{ customer }}-chatwoot-backups:/tmp/backups
environment:
- POSTGRES_DB=chatwoot_production
- POSTGRES_USER={{ chatwoot_postgres_username }}
# Please provide your own password.
- POSTGRES_PASSWORD={{ chatwoot_postgres_password }}
networks:
{{ customer }}-chatwoot:
ipv4_address: 172.20.1.4
redis:
image: redis:alpine
container_name: {{ customer }}-chatwoot-redis
restart: always
command: ["sh", "-c", "redis-server --requirepass \"$REDIS_PASSWORD\""]
env_file: /opt/letsbe/env/chatwoot.env
volumes:
- {{ customer }}-chatwoot-redis:/data
ports:
- '127.0.0.1:3050:6379'
networks:
{{ customer }}-chatwoot:
ipv4_address: 172.20.1.5
getmail:
image: python:3.12-alpine
container_name: {{ customer }}-chatwoot-getmail
restart: always
depends_on:
- rails
environment:
INGRESS_PASSWORD: ${RAILS_INBOUND_EMAIL_PASSWORD}
CHATWOOT_RELAY_URL: http://rails:3000/rails/action_mailbox/relay/inbound_emails
volumes:
- type: bind
source: /opt/letsbe/stacks/chatwoot/getmail
target: /opt/getmail
entrypoint: >
sh -c "
apk add --no-cache curl ca-certificates &&
pip install --no-cache-dir getmail6 &&
chmod +x /opt/getmail/import_mail_to_chatwoot || true &&
while true; do
for f in /opt/getmail/getmailrc /opt/getmail/getmailrc-*; do
[ -f \"$f\" ] || continue
getmail --getmaildir /opt/getmail --rcfile \"$(basename \"$f\")\" --quiet > done
sleep 60
done
"
networks:
{{ customer }}-chatwoot:
ipv4_address: 172.20.1.6
networks:
{{ customer }}-chatwoot:
ipam:
driver: default
config:
- subnet: 172.20.1.0/28
gateway: 172.20.1.1
volumes:
{{ customer }}-chatwoot-storage:
{{ customer }}-chatwoot-postgres:
{{ customer }}-chatwoot-redis:
{{ customer }}-chatwoot-backups:

View File

@@ -0,0 +1,21 @@
watch:
workers: 20
schedule: "0 */6 * * *"
jitter: 30s
firstCheckNotif: true
providers:
docker:
watchStopped: true
watchByDefault: false
notif:
mail:
host: mail.{{ domain }} # your mail server (Poste in your case)
port: 465 # SSL port
ssl: true
insecureSkipVerify: false
username: updates@{{ domain }} # change to your sender address
password: ##EmailPassword # use a strong app password
from: updates@{{ domain }}
to: matt@letsbe.solutions

View File

@@ -0,0 +1,19 @@
version: "3.7"
services:
diun:
container_name: {{ customer }}-diun
image: crazymax/diun:latest
command: serve
labels:
- "diun.enable=true"
volumes:
- ./data:/data
- ./diun.yml:/diun.yml:ro
- /var/run/docker.sock:/var/run/docker.sock
environment:
- TZ=Europe/Paris
- DIUN_CONFIG=/diun.yml
- LOG_LEVEL=info
- LOG_JSON=false
restart: always

View File

@@ -0,0 +1,47 @@
# Database Settings
POSTGRES_USER={{ documenso_postgres_user }}
POSTGRES_PASSWORD={{ documenso_postgres_password }}
POSTGRES_DB=documenso_db
POSTGRES_PORT=5432
# Documenso App Settings
DOCUMENSO_PORT=3020
NEXTAUTH_URL=https://{{ domain_documenso }}
NEXTAUTH_SECRET={{ documenso_nextauth_secret }}
NEXT_PRIVATE_ENCRYPTION_KEY={{ documenso_encryption_key }}
NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY={{ documenso_encryption_secondary_key }}
NEXT_PRIVATE_GOOGLE_CLIENT_ID=
NEXT_PRIVATE_GOOGLE_CLIENT_SECRET=
NEXT_PUBLIC_WEBAPP_URL=https://{{ domain_documenso }}
NEXT_PUBLIC_MARKETING_URL=https://{{ domain }}
NEXT_PRIVATE_DATABASE_URL=postgres://{{ documenso_postgres_user }}:{{ documenso_postgres_password }}@{{ customer }}-documenso-db:5432/documenso_db
NEXT_PRIVATE_DIRECT_DATABASE_URL=postgres://{{ documenso_postgres_user }}:{{ documenso_postgres_password }}@{{ customer }}-documenso-db:5432/documenso_db
NEXT_PUBLIC_UPLOAD_TRANSPORT=db
NEXT_PRIVATE_UPLOAD_ENDPOINT=https://{{ domain_s3 }}
NEXT_PRIVATE_UPLOAD_FORCE_PATH_STYLE=true
NEXT_PRIVATE_UPLOAD_REGION=eu-central-1
NEXT_PRIVATE_UPLOAD_BUCKET=signatures
NEXT_PRIVATE_UPLOAD_ACCESS_KEY_ID={{ minio_root_user }}
NEXT_PRIVATE_UPLOAD_SECRET_ACCESS_KEY={{ minio_root_password }}
NEXT_PRIVATE_SMTP_TRANSPORT=smtp-auth
NEXT_PRIVATE_SMTP_HOST={{ domain_poste }}
NEXT_PRIVATE_SMTP_PORT=465
NEXT_PRIVATE_SMTP_USERNAME=noreply@{{ domain }}
NEXT_PRIVATE_SMTP_PASSWORD=
NEXT_PRIVATE_SMTP_APIKEY_USER=
NEXT_PRIVATE_SMTP_APIKEY=
NEXT_PRIVATE_SMTP_SECURE=true
NEXT_PRIVATE_SMTP_FROM_NAME="{{ company_name }} Signatures"
NEXT_PRIVATE_SMTP_FROM_ADDRESS=noreply@{{ domain }}
NEXT_PRIVATE_SMTP_SERVICE=
NEXT_PRIVATE_RESEND_API_KEY=
NEXT_PRIVATE_MAILCHANNELS_API_KEY=
NEXT_PRIVATE_MAILCHANNELS_ENDPOINT=
NEXT_PRIVATE_MAILCHANNELS_DKIM_DOMAIN=
NEXT_PRIVATE_MAILCHANNELS_DKIM_SELECTOR=
NEXT_PRIVATE_MAILCHANNELS_DKIM_PRIVATE_KEY=
NEXT_PUBLIC_DOCUMENT_SIZE_UPLOAD_LIMIT=50MB
NEXT_PUBLIC_POSTHOG_KEY=
NEXT_PUBLIC_DISABLE_SIGNUP=true
NEXT_PRIVATE_SIGNING_LOCAL_FILE_PATH=/opt/documenso/certificate.p12
NEXT_PRIVATE_SIGNING_PASSPHRASE=

View File

@@ -0,0 +1,60 @@
version: "3.8"
services:
database:
container_name: {{ customer }}-documenso-db
image: postgres:15
restart: always
env_file:
- /opt/letsbe/env/documenso.env
environment:
- POSTGRES_USER={{ documenso_postgres_user }}
- POSTGRES_PASSWORD={{ documenso_postgres_password }}
- POSTGRES_DB=documenso_db
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U {{ documenso_postgres_user }} -d documenso_db']
interval: 10s
timeout: 5s
retries: 5
volumes:
- {{ customer }}-documenso-database:/var/lib/postgresql/data
- {{ customer }}-documenso-backups:/tmp/backups
networks:
{{ customer }}-documenso:
ipv4_address: 172.20.29.2
ports:
- "127.0.0.1:5432:5432"
documenso:
container_name: {{ customer }}-documenso-app
image: documenso/documenso:latest
restart: always
depends_on:
database:
condition: service_healthy
env_file:
- /opt/letsbe/env/documenso.env
environment:
- PORT=3020
- NEXT_PRIVATE_INTERNAL_WEBAPP_URL=https://{{ domain_documenso }}
ports:
- "127.0.0.1:3020:3020"
volumes:
- /opt/documenso/certificate.p12:/opt/documenso/certificate.p12
- /etc/localtime:/etc/localtime:ro
networks:
{{ customer }}-documenso:
ipv4_address: 172.20.29.3
labels:
- "diun.enable=true"
networks:
{{ customer }}-documenso:
driver: bridge
ipam:
config:
- subnet: 172.20.29.0/28
volumes:
{{ customer }}-documenso-database:
{{ customer }}-documenso-backups:

View File

@@ -0,0 +1,53 @@
version: '3.8'
services:
ghost:
container_name: {{ customer }}-ghost
image: ghost:alpine
restart: always
ports:
- "127.0.0.1:2368:2368"
environment:
# see https://ghost.org/docs/config/#configuration-options
database__client: mysql
database__connection__host: {{ customer }}-ghost-db
database__connection__user: root
database__connection__password: {{ ghost_mysql_password }}
database__connection__database: ghost
url: https://{{ domain_ghost }}
volumes:
- {{ customer }}-ghost-data:/var/lib/ghost/content
- ./config.production.json:/var/lib/ghost/config.production.json
networks:
{{ customer }}-ghost:
ipv4_address: 172.20.30.2
depends_on:
- ghost-db
labels:
- "diun.enable=true"
ghost-db:
container_name: {{ customer }}-ghost-db
image: mysql:8.0
restart: always
environment:
MYSQL_ROOT_PASSWORD: {{ ghost_mysql_password }}
MYSQL_DATABASE: ghost
volumes:
- {{ customer }}-ghost-db:/var/lib/mysql
- {{ customer }}-ghost-backups:/tmp/backups
networks:
{{ customer }}-ghost:
ipv4_address: 172.20.30.3
networks:
{{ customer }}-ghost:
driver: bridge
ipam:
config:
- subnet: 172.20.30.0/28
volumes:
{{ customer }}-ghost-data:
{{ customer }}-ghost-db:
{{ customer }}-ghost-backups:

View File

@@ -0,0 +1,37 @@
version: '3.9'
services:
drone_gitea:
container_name: {{ customer }}-drone_gitea
image: drone/drone:2
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-drone-gitea-data:/data
- {{ customer }}-drone-gitea-backups:/tmp/backups
ports:
- "127.0.0.1:3009:80"
# - "127.0.0.1:3010:443"
environment:
DRONE_GITEA_SERVER: 'https://{{ domain_gitea }}'
DRONE_GITEA_CLIENT_ID: ''
DRONE_GITEA_CLIENT_SECRET: ''
DRONE_RPC_SECRET: '{{ drone_gitea_rpc_secret }}'
DRONE_SERVER_HOST: '{{ domain_gitea_drone }}'
DRONE_SERVER_PROTO: https
networks:
{{ customer }}-drone-gitea:
ipv4_address: 172.20.2.2
networks:
{{ customer }}-drone-gitea:
ipam:
driver: default
config:
- subnet: 172.20.2.0/28
gateway: 172.20.2.1
volumes:
{{ customer }}-drone-gitea-data:
{{ customer }}-drone-gitea-backups:

View File

@@ -0,0 +1,76 @@
### - POSTGRES - ###
version: '3.9'
services:
gitea:
container_name: {{ customer }}-gitea
image: gitea/gitea:latest
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-gitea-data:/data
- {{ customer }}-gitea-backups:/tmp/backups
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "127.0.0.1:3007:3000"
- "3036:22"
environment:
USER_UID: 1000
USER_GID: 1000
GITEA__database__DB_TYPE: postgres
GITEA__database__HOST: {{ customer }}-gitea-db:5432
GITEA__database__NAME: 'gitea'
GITEA__database__USER: '{{ gitea_postgres_user }}'
GITEA__database__PASSWD: '{{ gitea_postgres_password }}'
networks:
{{ customer }}-gitea:
ipv4_address: 172.20.3.2
depends_on:
- gitea-db
gitea-db:
container_name: {{ customer }}-gitea-db
image: postgres:14
restart: always
environment:
POSTGRES_USER: '{{ gitea_postgres_user }}'
POSTGRES_PASSWORD: '{{ gitea_postgres_password }}'
POSTGRES_DB: 'gitea'
volumes:
- {{ customer }}-gitea-postgres:/var/lib/postgresql/data
- {{ customer }}-gitea-backups:/tmp/backups
networks:
{{ customer }}-gitea:
ipv4_address: 172.20.3.3
# runner:
# image: gitea/act_runner:latest-dind-rootless
# restart: always
# privileged: true
# volumes:
# - {{ customer }}-gitea-runner:/data
# environment:
# - GITEA_INSTANCE_URL=https://{{ domain_gitea }}
# - DOCKER_HOST=unix:///var/run/user/1000/docker.sock
# - GITEA_RUNNER_REGISTRATION_TOKEN=<registration token>
# networks:
# {{ customer }}-gitea:
# ipv4_address: 172.20.3.5
# depends_on:
# - gitea
networks:
{{ customer }}-gitea:
ipam:
driver: default
config:
- subnet: 172.20.3.0/28
gateway: 172.20.3.1
volumes:
{{ customer }}-gitea-data:
{{ customer }}-gitea-postgres:
{{ customer }}-gitea-runner:
{{ customer }}-gitea-backups:

View File

@@ -0,0 +1,109 @@
version: "3.8"
services:
postgres:
container_name: {{ customer }}-glitchtip-postgres
image: postgres:15
ports:
- "127.0.0.1:3046:5432"
environment:
#POSTGRES_HOST_AUTH_METHOD: "trust" # Consider removing this and setting a password
POSTGRES_PASSWORD: '{{ glitchtip_database_password }}'
restart: always
volumes:
- {{ customer }}-glitchtip-postgres:/var/lib/postgresql/data
- {{ customer }}-glitchtip-backups:/tmp/backups
networks:
{{ customer }}-glitchtip:
ipv4_address: 172.20.4.2
redis:
image: redis
container_name: {{ customer }}-glitchtip_redis
restart: always
networks:
{{ customer }}-glitchtip:
ipv4_address: 172.20.4.3
web:
container_name: {{ customer }}-glitchtip-web
image: glitchtip/glitchtip
labels:
- "diun.enable=true"
depends_on:
- postgres
- redis
ports:
- "127.0.0.1:3017:8000"
environment:
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
PORT: 8000
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
restart: always
volumes:
- {{ customer }}-glitchtip-uploads:/code/uploads
- {{ customer }}-glitchtip-backups:/tmp/backups
networks:
{{ customer }}-glitchtip:
ipv4_address: 172.20.4.4
worker:
container_name: {{ customer }}-glitchtip-worker
image: glitchtip/glitchtip
command: ./bin/run-celery-with-beat.sh
depends_on:
- postgres
- redis
environment:
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
PORT: 8000
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
restart: always
volumes:
- {{ customer }}-glitchtip-uploads:/code/uploads
networks:
{{ customer }}-glitchtip:
ipv4_address: 172.20.4.5
migrate:
container_name: {{ customer }}-glitchtip-migrate
image: glitchtip/glitchtip
depends_on:
- postgres
- redis
command: "./manage.py migrate"
environment:
DATABASE_URL: 'postgres://postgres:{{ glitchtip_database_password }}@{{ customer }}-glitchtip-postgres:5432/postgres'
SECRET_KEY: '{{ glitchtip_secret_key }}' # best to run openssl rand -hex 32
PORT: 8000
#EMAIL_URL: 'consolemail://email:password@smtp-url:port' # Example smtp://email:password@smtp_url:port https://glitchtip.com/documentation/install#configuration
GLITCHTIP_DOMAIN: 'https://{{ domain_glitchtip }}' # Change this to your domain
DEFAULT_FROM_EMAIL: 'no-reply@{{ domain }}' # Change this to your email
#CELERY_WORKER_AUTOSCALE: "1,2" # Scale between 1 and 3 to prevent excessive memory usage. Change it or remove to set it to the number of cpu cores.
#CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
networks:
{{ customer }}-glitchtip:
ipv4_address: 172.20.4.6
networks:
{{ customer }}-glitchtip:
ipam:
driver: default
config:
- subnet: 172.20.4.0/28
gateway: 172.20.4.1
volumes:
{{ customer }}-glitchtip-postgres:
{{ customer }}-glitchtip-uploads:
{{ customer }}-glitchtip-backups:

View File

@@ -0,0 +1,29 @@
version: '3.9'
services:
html:
container_name: {{ customer }}-html-website
image: nginx
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-html:/usr/share/nginx/html:ro
- {{ customer }}-html-backups:/tmp/backups
ports:
- "127.0.0.1:3000:80"
networks:
{{ customer }}-html:
ipv4_address: 172.20.5.2
networks:
{{ customer }}-html:
ipam:
driver: default
config:
- subnet: 172.20.5.0/28
gateway: 172.20.5.1
volumes:
{{ customer }}-html:
{{ customer }}-html-backups:

View File

@@ -0,0 +1,10 @@
KEYCLOAK_ADMIN=admin
KEYCLOAK_ADMIN_PASSWORD={{ keycloak_admin_password }}
KC_DB=postgres
KC_DB_URL=jdbc:postgresql://{{ customer }}-keycloak-db:5432/keycloak
KC_DB_USERNAME=keycloak
KC_DB_PASSWORD={{ keycloak_postgres_password }}
KC_HOSTNAME_STRICT=false
KC_PROXY=edge
KC_HTTP_RELATIVE_PATH=/
KC_HEALTH_ENABLED=true

View File

@@ -0,0 +1,54 @@
version: '3.8'
services:
postgres:
container_name: {{ customer }}-keycloak-db
image: postgres:14
restart: always
volumes:
- {{ customer }}-keycloak-postgres:/var/lib/postgresql/data
- {{ customer }}-keycloak-backups:/tmp/backups
environment:
POSTGRES_DB: keycloak
POSTGRES_USER: keycloak
POSTGRES_PASSWORD: {{ keycloak_postgres_password }}
networks:
{{ customer }}-keycloak:
ipv4_address: 172.20.31.2
keycloak:
container_name: {{ customer }}-keycloak
image: quay.io/keycloak/keycloak:latest
restart: always
command: start
environment:
KC_DB: postgres
KC_DB_URL: jdbc:postgresql://{{ customer }}-keycloak-db:5432/keycloak
KC_DB_USERNAME: keycloak
KC_DB_PASSWORD: {{ keycloak_postgres_password }}
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: {{ keycloak_admin_password }}
KC_HOSTNAME_STRICT: false
KC_PROXY: edge
KC_HTTP_RELATIVE_PATH: /
KC_HEALTH_ENABLED: true
depends_on:
- postgres
ports:
- "127.0.0.1:8080:8080"
networks:
{{ customer }}-keycloak:
ipv4_address: 172.20.31.3
labels:
- "diun.enable=true"
networks:
{{ customer }}-keycloak:
driver: bridge
ipam:
config:
- subnet: 172.20.31.0/28
volumes:
{{ customer }}-keycloak-postgres:
{{ customer }}-keycloak-backups:

View File

@@ -0,0 +1,574 @@
#=====================================================================#
# LibreChat Configuration #
#=====================================================================#
# Please refer to the reference documentation for assistance #
# with configuring your LibreChat environment. #
# #
# https://www.librechat.ai/docs/configuration/dotenv #
#=====================================================================#
#==================================================#
# Server Configuration #
#==================================================#
HOST=0.0.0.0
PORT=3080
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
DOMAIN_CLIENT=https://{{ domain_librechat }}
DOMAIN_SERVER=https://{{ domain_librechat }}
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
#===============#
# JSON Logging #
#===============#
# Use when process console logs in cloud deployment like GCP/AWS
CONSOLE_JSON=false
#===============#
# Debug Logging #
#===============#
DEBUG_LOGGING=true
DEBUG_CONSOLE=false
#=============#
# Permissions #
#=============#
# UID=1000
# GID=1000
#===============#
# Configuration #
#===============#
# Use an absolute path, a relative path, or a URL
# CONFIG_PATH="/alternative/path/to/librechat.yaml"
#===================================================#
# Endpoints #
#===================================================#
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
PROXY=
#===================================#
# Known Endpoints - librechat.yaml #
#===================================#
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
# ANYSCALE_API_KEY=
# APIPIE_API_KEY=
# COHERE_API_KEY=
# DEEPSEEK_API_KEY=
# DATABRICKS_API_KEY=
# FIREWORKS_API_KEY=
# GROQ_API_KEY=
# HUGGINGFACE_TOKEN=
# MISTRAL_API_KEY=
# OPENROUTER_KEY=
# PERPLEXITY_API_KEY=
# SHUTTLEAI_API_KEY=
# TOGETHERAI_API_KEY=
# UNIFY_API_KEY=
# XAI_API_KEY=
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
# Note: these variables are DEPRECATED
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
# AZURE_API_KEY= # Deprecated
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_VERSION= # Deprecated
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
#=================#
# AWS Bedrock #
#=================#
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
# Notes on specific models:
# The following models are not support due to not supporting streaming:
# ai21.j2-mid-v1
# The following models are not support due to not supporting conversation history:
# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
#============#
# Google #
#============#
GOOGLE_KEY=
# GOOGLE_REVERSE_PROXY=
# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-2.5-pro-exp-03-25,gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
# GOOGLE_TITLE_MODEL=gemini-pro
# GOOGLE_LOC=us-central1
# Google Safety Settings
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
#
# For Vertex AI:
# To use the BLOCK_NONE setting, you need either:
# (a) Access through an allowlist via your Google account team, or
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
#
# For Gemini API (AI Studio):
# BLOCK_NONE is available by default, no special account requirements.
#
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
#
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
#============#
# OpenAI #
#============#
OPENAI_API_KEY=
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
# TITLE_CONVO=false
# OPENAI_TITLE_MODEL=gpt-4o-mini
# OPENAI_SUMMARIZE=true
# OPENAI_SUMMARY_MODEL=gpt-4o-mini
# OPENAI_FORCE_PROMPT=true
# OPENAI_REVERSE_PROXY=
# OPENAI_ORGANIZATION=
#====================#
# Assistants API #
#====================#
ASSISTANTS_API_KEY=
# ASSISTANTS_BASE_URL=
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#==========================#
# Azure Assistants API #
#==========================#
# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
# The models for Azure Assistants are also determined by your Azure OpenAI configuration.
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
#============#
# Plugins #
#============#
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
# Azure AI Search
#-----------------
AZURE_AI_SEARCH_SERVICE_ENDPOINT=
AZURE_AI_SEARCH_INDEX_NAME=
AZURE_AI_SEARCH_API_KEY=
AZURE_AI_SEARCH_API_VERSION=
AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALL·E
#----------------
DALLE_API_KEY=
DALLE3_API_KEY=
DALLE2_API_KEY=
# DALLE3_SYSTEM_PROMPT=
# DALLE2_SYSTEM_PROMPT=
# DALLE_REVERSE_PROXY=
# DALLE3_BASEURL=
# DALLE2_BASEURL=
# DALL·E (via Azure OpenAI)
# Note: requires some of the variables above to be set
#----------------
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Flux
#-----------------
FLUX_API_BASE_URL=https://api.us1.bfl.ai
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
# Get your API key at https://api.us1.bfl.ai/auth/profile
# FLUX_API_KEY=
# Google
#-----------------
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# SerpAPI
#-----------------
SERPAPI_API_KEY=
# Stable Diffusion
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860
# Tavily
#-----------------
TAVILY_API_KEY=
# Traversaal
#-----------------
TRAVERSAAL_API_KEY=
# WolframAlpha
#-----------------
WOLFRAM_APP_ID=
# Zapier
#-----------------
ZAPIER_NLA_API_KEY=
#==================================================#
# Search #
#==================================================#
SEARCH=true
MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=6211530205576eaa3d97d215d5c12813
# Optional: Disable indexing, useful in a multi-node setup
# where only one instance should perform an index sync.
# MEILI_NO_SYNC=true
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
STT_API_KEY=
TTS_API_KEY=
#==================================================#
# RAG #
#==================================================#
# More info: https://www.librechat.ai/docs/configuration/rag_api
#RAG_OPENAI_BASEURL=http://localhost:8000
RAG_OPENAI_API_KEY=
RAG_USE_FULL_CONTEXT=true
EMBEDDINGS_PROVIDER=openai
EMBEDDINGS_MODEL=text-embedding-3-small
POSTGRES_DB=librechat
POSTGRES_USER={{ librechat_postgres_user}}
POSTGRES_PASSWORD={{ librechat_postgres_password }}
DEBUG_RAG_API=true
#===================================================#
# User System #
#===================================================#
#========================#
# Moderation #
#========================#
OPENAI_MODERATION=false
OPENAI_MODERATION_API_KEY=
# OPENAI_MODERATION_REVERSE_PROXY=
BAN_VIOLATIONS=true
BAN_DURATION=1000 * 60 * 60 * 2
BAN_INTERVAL=20
LOGIN_VIOLATION_SCORE=1
REGISTRATION_VIOLATION_SCORE=1
CONCURRENT_VIOLATION_SCORE=1
MESSAGE_VIOLATION_SCORE=1
NON_BROWSER_VIOLATION_SCORE=20
LOGIN_MAX=7
LOGIN_WINDOW=5
REGISTER_MAX=5
REGISTER_WINDOW=60
LIMIT_CONCURRENT_MESSAGES=true
CONCURRENT_MESSAGE_MAX=2
LIMIT_MESSAGE_IP=true
MESSAGE_IP_MAX=40
MESSAGE_IP_WINDOW=1
LIMIT_MESSAGE_USER=false
MESSAGE_USER_MAX=40
MESSAGE_USER_WINDOW=1
ILLEGAL_MODEL_REQ_SCORE=5
#========================#
# Balance #
#========================#
# CHECK_BALANCE=false
# START_BALANCE=20000 # note: the number of tokens that will be credited after registration.
#========================#
# Registration and Login #
#========================#
ALLOW_EMAIL_LOGIN=true
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=true
ALLOW_SOCIAL_REGISTRATION=true
ALLOW_PASSWORD_RESET=false
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
SESSION_EXPIRY=1000 * 60 * 15
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
JWT_SECRET={{ librechat_jwt_secret }}
JWT_REFRESH_SECRET={{ librechat_jwt_refresh_secret }}
# Discord
DISCORD_CLIENT_ID=
DISCORD_CLIENT_SECRET=
DISCORD_CALLBACK_URL=/oauth/discord/callback
# Facebook
FACEBOOK_CLIENT_ID=
FACEBOOK_CLIENT_SECRET=
FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
# GitHub
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback
# GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT=
# Google
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
GOOGLE_CALLBACK_URL=/oauth/google/callback
# Apple
APPLE_CLIENT_ID=
APPLE_TEAM_ID=
APPLE_KEY_ID=
APPLE_PRIVATE_KEY_PATH=
APPLE_CALLBACK_URL=/oauth/apple/callback
# OpenID
OPENID_CLIENT_ID=
OPENID_CLIENT_SECRET=
OPENID_ISSUER=
OPENID_SESSION_SECRET=
OPENID_SCOPE="openid profile email"
OPENID_CALLBACK_URL=/oauth/openid/callback
OPENID_REQUIRED_ROLE=
OPENID_REQUIRED_ROLE_TOKEN_KIND=
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
# Set to determine which user info property returned from OpenID Provider to store as the User's username
OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
OPENID_AUTO_REDIRECT=false
# LDAP
LDAP_URL=
LDAP_BIND_DN=
LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
#LDAP_SEARCH_FILTER="mail="
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
# LDAP_STARTTLS=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
# LDAP_EMAIL=
# LDAP_FULL_NAME=
#========================#
# Email Password Reset #
#========================#
EMAIL_SERVICE=
EMAIL_HOST=
EMAIL_PORT=25
EMAIL_ENCRYPTION=
EMAIL_ENCRYPTION_HOSTNAME=
EMAIL_ALLOW_SELFSIGNED=
EMAIL_USERNAME=
EMAIL_PASSWORD=
EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
# Firebase CDN #
#========================#
FIREBASE_API_KEY=
FIREBASE_AUTH_DOMAIN=
FIREBASE_PROJECT_ID=
FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#========================#
# S3 AWS Bucket #
#========================#
AWS_ENDPOINT_URL=
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_REGION=
AWS_BUCKET_NAME=
#========================#
# Azure Blob Storage #
#========================#
AZURE_STORAGE_CONNECTION_STRING=
AZURE_STORAGE_PUBLIC_ACCESS=false
AZURE_CONTAINER_NAME=files
#========================#
# Shared Links #
#========================#
ALLOW_SHARED_LINKS=true
ALLOW_SHARED_LINKS_PUBLIC=true
#==============================#
# Static File Cache Control #
#==============================#
# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
# NODE_ENV must be set to production for these to take effect
# STATIC_CACHE_MAX_AGE=172800
# STATIC_CACHE_S_MAX_AGE=86400
# If you have another service in front of your LibreChat doing compression, disable express based compression here
# DISABLE_COMPRESSION=true
#===================================================#
# UI #
#===================================================#
APP_TITLE={{ customer }} AI
CUSTOM_FOOTER=
HELP_AND_FAQ_URL=https://{{ domain }}
# SHOW_BIRTHDAY_ICON=true
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
#===============#
# REDIS Options #
#===============#
# REDIS_URI=10.10.10.10:6379
# USE_REDIS=true
# USE_REDIS_CLUSTER=true
# REDIS_CA=/path/to/ca.crt
#==================================================#
# Others #
#==================================================#
# You should leave the following commented out #
# NODE_ENV=
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
#=====================================================#
# Cache Headers #
#=====================================================#
# Headers that control caching of the index.html #
# Default configuration prevents caching to ensure #
# users always get the latest version. Customize #
# only if you understand caching implications. #
# INDEX_HTML_CACHE_CONTROL=no-cache, no-store, must-revalidate
# INDEX_HTML_PRAGMA=no-cache
# INDEX_HTML_EXPIRES=0
# no-cache: Forces validation with server before using cached version
# no-store: Prevents storing the response entirely
# must-revalidate: Prevents using stale content when offline
#=====================================================#
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=

View File

@@ -0,0 +1,96 @@
services:
api:
# build:
# context: .
# dockerfile: Dockerfile.multi
# target: api-build
image: ghcr.io/danny-avila/librechat:v0.7.8
container_name: {{ customer }}-librechat
labels:
- "diun.enable=true"
ports:
- 3080:3080
depends_on:
- mongodb
- rag_api
restart: always
user: "${UID}:${GID}"
extra_hosts:
- "host.docker.internal:host-gateway"
env_file:
- /opt/letsbe/env/librechat.env
environment:
- HOST=0.0.0.0
- NODE_ENV=production
- MONGO_URI=mongodb://mongodb:27017/LibreChat
- MEILI_HOST=http://meilisearch:7700
- RAG_PORT=${RAG_PORT:-8000}
- RAG_API_URL=http://rag_api:${RAG_PORT:-8000}
- EMAIL_HOST=mail.{{ domain }}
- EMAIL_PORT=587
- EMAIL_ENCRYPTION=starttls
- EMAIL_USERNAME=noreply@{{ domain }}
- EMAIL_PASSWORD=Q2WjzJ05525I0cmyxAYn57wKhRSumMHXnHN8
- EMAIL_FROM_NAME={{customer}}AI
- EMAIL_FROM=noreply@{{ domain }}
volumes:
- type: bind
source: ./librechat.yaml
target: /app/librechat.yaml
- ./images:/app/client/public/images
- ./uploads:/app/uploads
- ./logs:/app/api/logs
mongodb:
container_name: librechat-mongodb
# ports: # Uncomment this to access mongodb from outside docker, not safe in deployment
# - 27018:27017
image: mongo
restart: always
user: "${UID}:${GID}"
volumes:
- ./data-node:/data/db
command: mongod --noauth
meilisearch:
container_name: librechat-meilisearch
image: getmeili/meilisearch:v1.12.3
restart: always
user: "${UID}:${GID}"
# ports: # Uncomment this to access meilisearch from outside docker
# - 7700:7700 # if exposing these ports, make sure your master key is not the default value
env_file:
- /opt/letsbe/env/librechat.env
environment:
- MEILI_HOST=http://meilisearch:7700
- MEILI_NO_ANALYTICS=true
volumes:
- ./meili_data_v1.12:/meili_data
vectordb:
image: ankane/pgvector:latest
environment:
POSTGRES_DB: librechat
POSTGRES_USER: {{ librechat_postgres_user }}
POSTGRES_PASSWORD: {{ librechat_postgres_password }}
restart: always
volumes:
- pgdata2:/var/lib/postgresql/data
rag_api:
image: ghcr.io/danny-avila/librechat-rag-api-dev-lite:latest
environment:
- DB_HOST=vectordb
- RAG_PORT=8000
restart: always
depends_on:
- vectordb
env_file:
- /opt/letsbe/env/librechat.env
ports:
- "8000:8000"
networks:
# Declare the same network name as in the ActivePieces file, but mark as external
{{ customer }}-activepieces:
external: true
volumes:
pgdata2:

View File

@@ -0,0 +1,318 @@
# For more information, see the Configuration Guide:
# https://www.librechat.ai/docs/configuration/librechat_yaml
# Configuration version (required)
version: 1.2.1
# Cache settings: Set to true to enable caching
cache: true
# File strategy s3/firebase
# fileStrategy: "s3"
# Custom interface configuration
interface:
customWelcome: "Welcome to {{ customer }} AI! Enjoy your experience."
# Privacy policy settings
privacyPolicy:
externalUrl: 'https://librechat.ai/privacy-policy'
openNewTab: true
# Terms of service
termsOfService:
externalUrl: 'https://librechat.ai/tos'
openNewTab: true
modalAcceptance: true
modalTitle: "Terms of Service for LibreChat"
modalContent: |
# Terms and Conditions for LibreChat
*Effective Date: February 18, 2024*
Welcome to LibreChat, the informational website for the open-source AI chat platform, available at https://librechat.ai. These Terms of Service ("Terms") govern your use of our website and the services we offer. By accessing or using the Website, you agree to be bound by these Terms and our Privacy Policy, accessible at https://librechat.ai//privacy.
## 1. Ownership
Upon purchasing a package from LibreChat, you are granted the right to download and use the code for accessing an admin panel for LibreChat. While you own the downloaded code, you are expressly prohibited from reselling, redistributing, or otherwise transferring the code to third parties without explicit permission from LibreChat.
## 2. User Data
We collect personal data, such as your name, email address, and payment information, as described in our Privacy Policy. This information is collected to provide and improve our services, process transactions, and communicate with you.
## 3. Non-Personal Data Collection
The Website uses cookies to enhance user experience, analyze site usage, and facilitate certain functionalities. By using the Website, you consent to the use of cookies in accordance with our Privacy Policy.
## 4. Use of the Website
You agree to use the Website only for lawful purposes and in a manner that does not infringe the rights of, restrict, or inhibit anyone else's use and enjoyment of the Website. Prohibited behavior includes harassing or causing distress or inconvenience to any person, transmitting obscene or offensive content, or disrupting the normal flow of dialogue within the Website.
## 5. Governing Law
These Terms shall be governed by and construed in accordance with the laws of the United States, without giving effect to any principles of conflicts of law.
## 6. Changes to the Terms
We reserve the right to modify these Terms at any time. We will notify users of any changes by email. Your continued use of the Website after such changes have been notified will constitute your consent to such changes.
## 7. Contact Information
If you have any questions about these Terms, please contact us at contact@librechat.ai.
By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.
endpointsMenu: true
modelSelect: true
parameters: true
sidePanel: true
presets: true
prompts: true
bookmarks: true
multiConvo: true
agents: true
# Example Registration Object Structure (optional)
registration:
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook', 'apple']
# allowedDomains:
# - "gmail.com"
# Example Balance settings
# balance:
# enabled: false
# startBalance: 20000
# autoRefillEnabled: false
# refillIntervalValue: 30
# refillIntervalUnit: 'days'
# refillAmount: 10000
speech:
tts:
openai:
url: 'https://api.openai.com/v1'
apiKey: ''
model: 'tts-1-hd'
voices: ['alloy']
#
stt:
openai:
url: 'https://api.openai.com/v1'
apiKey: ''
model: 'whisper-1'
# rateLimits:
# fileUploads:
# ipMax: 100
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
# userMax: 50
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
# conversationsImport:
# ipMax: 100
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
# userMax: 50
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
# Example Actions Object Structure
actions:
allowedDomains:
- "swapi.dev"
- "librechat.ai"
- "google.com"
- "{{ domain_librechat }}"
- "{{ domain_activepieces }}"
# Example MCP Servers Object Structure
# mcpServers:
# everything:
# # type: sse # type can optionally be omitted
# url: http://localhost:3001/sse
# timeout: 60000 # 1 minute timeout for this server, this is the default timeout for MCP servers.
# puppeteer:
# type: stdio
# command: npx
# args:
# - -y
# - "@modelcontextprotocol/server-puppeteer"
# timeout: 300000 # 5 minutes timeout for this server
# filesystem:
# # type: stdio
# command: npx
# args:
# - -y
# - "@modelcontextprotocol/server-filesystem"
# - /home/user/LibreChat/
# iconPath: /home/user/LibreChat/client/public/assets/logo.svg
# mcp-obsidian:
# command: npx
# args:
# - -y
# - "mcp-obsidian"
# - /path/to/obsidian/vault
#mcpServers:
#PortNimaraAI:
#type: sse
#url: "https://automation.portnimara.com/api/v1/mcp/d6br5VnJuHUPuzpFUGJEo/sse"
#command: npx
#args:
# - -y
# - mcp-remote
# - "https://automation.portnimara.com/api/v1/mcp/d6br5VnJuHUPuzpFUGJEo/sse"
# Definition of custom endpoints
endpoints:
# assistants:
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
# pollIntervalMs: 3000 # Polling interval for checking assistant updates
# timeoutMs: 180000 # Timeout for assistant operations
# # Should only be one or the other, either `supportedIds` or `excludedIds`
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
# # excludedIds: ["asst_excludedAssistantId"]
# # Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
# # privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
# retrievalModels: ["gpt-4-turbo-preview"]
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
# capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
# agents:
# # (optional) Default recursion depth for agents, defaults to 25
# recursionLimit: 50
# # (optional) Max recursion depth for agents, defaults to 25
# maxRecursionLimit: 100
# # (optional) Disable the builder interface for agents
# disableBuilder: false
# # (optional) Agent Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
# capabilities: ["execute_code", "file_search", "actions", "tools"]
custom:
# Groq Example
- name: 'groq'
apiKey: '${GROQ_API_KEY}'
baseURL: 'https://api.groq.com/openai/v1/'
models:
default:
[
'llama3-70b-8192',
'llama3-8b-8192',
'llama2-70b-4096',
'mixtral-8x7b-32768',
'gemma-7b-it',
]
fetch: false
titleConvo: true
titleModel: 'mixtral-8x7b-32768'
modelDisplayLabel: 'groq'
# Mistral AI Example
- name: 'Mistral' # Unique name for the endpoint
# For `apiKey` and `baseURL`, you can use environment variables that you define.
# recommended environment variables:
apiKey: '${MISTRAL_API_KEY}'
baseURL: 'https://api.mistral.ai/v1'
# Models configuration
models:
# List of default models to use. At least one value is required.
default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
# Fetch option: Set to true to fetch models from API.
fetch: true # Defaults to false.
# Optional configurations
# Title Conversation setting
titleConvo: true # Set to true to enable title conversation
# Title Method: Choose between "completion" or "functions".
# titleMethod: "completion" # Defaults to "completion" if omitted.
# Title Model: Specify the model to use for titles.
titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
# Summarize setting: Set to true to enable summarization.
# summarize: false
# Summary Model: Specify the model to use if summarization is enabled.
# summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
# forcePrompt: false
# The label displayed for the AI model in messages.
modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
# Add additional parameters to the request. Default params will be overwritten.
# addParams:
# safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
# Drop Default params parameters from the request. See default params in guide linked below.
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
# OpenRouter Example
- name: 'OpenRouter'
# For `apiKey` and `baseURL`, you can use environment variables that you define.
# recommended environment variables:
apiKey: '${OPENROUTER_KEY}'
baseURL: 'https://openrouter.ai/api/v1'
models:
default: ['meta-llama/llama-3-70b-instruct']
fetch: true
titleConvo: true
titleModel: 'meta-llama/llama-3-70b-instruct'
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
dropParams: ['stop']
modelDisplayLabel: 'OpenRouter'
# Portkey AI Example
- name: "Portkey"
apiKey: "dummy"
baseURL: 'https://api.portkey.ai/v1'
headers:
x-portkey-api-key: '${PORTKEY_API_KEY}'
x-portkey-virtual-key: '${PORTKEY_OPENAI_VIRTUAL_KEY}'
models:
default: ['gpt-4o-mini', 'gpt-4o', 'chatgpt-4o-latest']
fetch: true
titleConvo: true
titleModel: 'current_model'
summarize: false
summaryModel: 'current_model'
forcePrompt: false
modelDisplayLabel: 'Portkey'
iconURL: https://images.crunchbase.com/image/upload/c_pad,f_auto,q_auto:eco,dpr_1/rjqy7ghvjoiu4cd1xjbf
# fileConfig:
# endpoints:
# assistants:
# fileLimit: 5
# fileSizeLimit: 10 # Maximum size for an individual file in MB
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
# supportedMimeTypes:
# - "image/.*"
# - "application/pdf"
# openAI:
# disabled: false # Disables file uploading to the OpenAI endpoint
# default:
# totalSizeLimit: 20
# YourCustomEndpointName:
# fileLimit: 2
# fileSizeLimit: 5
# serverFileSizeLimit: 100 # Global server file size limit in MB
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
# # See the Custom Configuration Guide for more information on Assistants Config:
# # https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
#modelSpecs:
# (optional) force the UI to only ever pick from this list
#enforce: false
#prioritize: true
#list:
# - name: "port-nimara-agent"
#label: "{{ customer }} AI"
#default: true # ← makes it the default on new chats
#description: "Agent"
#preset:
#endpoint: "agents" # ← use the Agents endpoint
#agent_id: "" # ← your actual agents ID

View File

@@ -0,0 +1,15 @@
[app]
address = "0.0.0.0:9000"
admin_username = "{{ listmonk_admin_username }}"
admin_password = "{{ listmonk_admin_password }}"
[db]
host = "{{ customer }}-listmonk-db"
port = 5432
user = "{{ listmonk_db_user }}"
password = "{{ listmonk_db_password }}"
database = "listmonk"
ssl_mode = "disable"
max_open = 25
max_idle = 25
max_lifetime = "300s"

View File

@@ -0,0 +1,57 @@
version: '3.9'
services:
listmonk-db:
container_name: {{ customer }}-listmonk-db
image: postgres:13
restart: always
volumes:
- {{ customer }}-listmonk-postgresql:/var/lib/postgresql/data
- {{ customer }}-listmonk-backups:/tmp/backups
ports:
- "127.0.0.1:3037:5432"
environment:
POSTGRES_DB: listmonk
POSTGRES_USER: {{ listmonk_db_user }}
POSTGRES_PASSWORD: {{ listmonk_db_password }}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U {{ listmonk_db_user }} -d listmonk"]
interval: 10s
timeout: 5s
retries: 6
networks:
{{ customer }}-listmonk:
ipv4_address: 172.20.6.2
listmonk-web:
container_name: {{ customer }}-listmonk-web
image: listmonk/listmonk:latest
restart: always
labels:
- "diun.enable=true"
command: [sh, -c, "yes | ./listmonk --install --config config.toml && ./listmonk --config config.toml"]
volumes:
- ./config.toml:/listmonk/config.toml
- {{ customer }}-listmonk-backups:/tmp/backups
ports:
- "127.0.0.1:3006:9000"
depends_on:
- listmonk-db
environment:
TZ: Etc/UTC
networks:
{{ customer }}-listmonk:
ipv4_address: 172.20.6.3
networks:
{{ customer }}-listmonk:
ipam:
driver: default
config:
- subnet: 172.20.6.0/28
gateway: 172.20.6.1
volumes:
{{ customer }}-listmonk-postgresql:
{{ customer }}-listmonk-backups:

View File

@@ -0,0 +1,32 @@
version: '3'
services:
minio:
image: minio/minio:latest
container_name: {{ customer }}-minio
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-minio-data:/data
environment:
- MINIO_ROOT_USER={{ minio_root_user }}
- MINIO_ROOT_PASSWORD={{ minio_root_password }}
command: server /data --console-address ":9001"
ports:
- "0.0.0.0:3058:9000"
- "0.0.0.0:3059:9001"
networks:
{{ customer }}-minio:
ipv4_address: 172.20.26.2
networks:
{{ customer }}-minio:
ipam:
driver: default
config:
- subnet: 172.20.26.0/28
gateway: 172.20.26.1
volumes:
{{ customer }}-minio-data:

View File

@@ -0,0 +1,63 @@
version: '3.8'
services:
n8n-postgres:
container_name: {{ customer }}-n8n-postgres
restart: always
image: postgres:16 #original: postgres:latest
environment:
- POSTGRES_DB=n8n
- POSTGRES_USER={{ n8n_postgres_user }}
- POSTGRES_PASSWORD={{ n8n_postgres_password }}
volumes:
- {{ customer }}-n8n-postgres:/var/lib/postgresql/data
- {{ customer }}-n8n-backups:/tmp/backups
networks:
{{ customer }}-n8n:
ipv4_address: 172.20.8.2
n8n:
container_name: {{ customer }}-n8n
restart: always
image: docker.n8n.io/n8nio/n8n
labels:
- "diun.enable=true"
ports:
- "127.0.0.1:3025:5678"
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_DATABASE=n8n
- DB_POSTGRESDB_HOST=n8n-postgres
- DB_POSTGRESDB_PORT=5432
- DB_POSTGRESDB_USER={{ n8n_postgres_user }}
#- DB_POSTGRESDB_SCHEMA=public
- DB_POSTGRESDB_PASSWORD={{ n8n_postgres_password }}
- N8N_EDITOR_BASE_URL=https://{{ domain_n8n }}
- N8N_EMAIL_MODE=smtp
- N8N_SMTP_SSL=false
- N8N_SMTP_HOST=
- N8N_SMTP_PORT=
- N8N_SMTP_USER=
- N8N_SMTP_PASS=
- N8N_SMTP_SENDER=
volumes:
- {{ customer }}-n8n-storage:/home/node/.n8n
- {{ customer }}-n8n-backups:/tmp/backups
links:
- n8n-postgres
networks:
{{ customer }}-n8n:
ipv4_address: 172.20.8.3
networks:
{{ customer }}-n8n:
ipam:
driver: default
config:
- subnet: 172.20.8.0/28
gateway: 172.20.8.1
volumes:
{{ customer }}-n8n-postgres:
{{ customer }}-n8n-storage:
{{ customer }}-n8n-backups:

View File

@@ -0,0 +1,171 @@
version: '3.9'
services:
db:
container_name: {{ customer }}-nextcloud-postgres
image: postgres:16-alpine #original postgres:alpine
restart: always
volumes:
- {{ customer }}-nextcloud-database:/var/lib/postgresql/data:Z
- {{ customer }}-nextcloud-backups:/tmp/backups
environment:
POSTGRES_DB: nextcloud
POSTGRES_USER: {{ nextcloud_postgres_user }}
POSTGRES_PASSWORD: {{ nextcloud_postgres_password }}
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.2
redis:
container_name: {{ customer }}-nextcloud-redis
image: redis:alpine
restart: always
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.3
app:
container_name: {{ customer }}-nextcloud-app
image: nextcloud:production-apache
restart: always
labels:
- "diun.enable=true"
ports:
- '127.0.0.1:3023:80'
volumes:
- {{ customer }}-nextcloud-html:/var/www/html:z
- /opt/letsbe/config/nextcloud:/var/www/html/config
- /opt/letsbe/data/nextcloud:/var/www/html/data
- {{ customer }}-nextcloud-backups:/tmp/backups
environment:
#Nextcloud
POSTGRES_HOST: {{ customer }}-nextcloud-postgres
REDIS_HOST: {{ customer }}-nextcloud-redis
POSTGRES_DB: nextcloud
POSTGRES_USER: {{ nextcloud_postgres_user }}
POSTGRES_PASSWORD: {{ nextcloud_postgres_password }}
# #SMTP
# SMTP_HOST: 'mail.{{ domain }}'
# SMTP_PORT: '587'
# SMTP_NAME: 'system@{{ domain }}'
# SMTP_PASSWORD: ''
# MAIL_FROM_ADDRESS: 'system'
# MAIL_DOMAIN: '{{ domain }}'
#Admin
NEXTCLOUD_ADMIN_USER: administrator@letsbe.biz
NEXTCLOUD_ADMIN_PASSWORD: '{{ nextcloud_admin_password }}'
#Config
NEXTCLOUD_TRUSTED_DOMAINS: '{{ domain_nextcloud }} 127.0.0.1 0.0.0.0'
TRUSTED_PROXIES: '{{ domain_nextcloud }} 127.0.0.1 0.0.0.0 172.*.*.*'
OVERWRITECLIURL: https://{{ domain_nextcloud }}
OVERWRITEPROTOCOL: https
OVERWRITEHOST: {{ domain_nextcloud }}
#APACHE_DISABLE_REWRITE_IP: 1
depends_on:
- db
- redis
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.4
cron:
container_name: {{ customer }}-nextcloud-cron
image: nextcloud:production-apache
restart: always
volumes:
- {{ customer }}-nextcloud-html:/var/www/html:z
- /opt/letsbe/config/nextcloud:/var/www/html/config
- /opt/letsbe/data/nextcloud:/var/www/html/data
entrypoint: /cron.sh
depends_on:
- db
- redis
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.5
collabora:
image: collabora/code:latest
container_name: {{ customer }}-nextcloud-collabora
restart: always
environment:
- password={{ collabora_password }}
- username={{ collabora_user }}
- domain={{ domain_collabora }}
- extra_params=--o:ssl.enable=true
ports:
- '127.0.0.1:3044:9980'
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.7
nextcloud-whiteboard-server:
image: ghcr.io/nextcloud-releases/whiteboard:release
ports:
- '127.0.0.1:3060:3002'
environment:
NEXTCLOUD_URL: '{{ domain_nextcloud }}'
JWT_SECRET_KEY: '{{ nextcloud_jwt_secret }}'
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.8
talk-hpb:
container_name: {{ customer }}-nextcloud-talk-hpb
image: ghcr.io/nextcloud-releases/aio-talk:latest
restart: always
environment:
NC_DOMAIN: {{ domain_nextcloud }}
TALK_PORT: "3478"
TURN_SECRET: "{{ turn_secret }}"
SIGNALING_SECRET: "{{ signaling_secret }}"
INTERNAL_SECRET: "{{ internal_secret }}"
ports:
- "127.0.0.1:3061:8081"
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.6
coturn:
image: instrumentisto/coturn:latest
container_name: {{ customer }}-coturn
restart: always
ports:
- "3478:3478/udp"
- "3478:3478/tcp"
- "49160-49200:49160-49200/udp"
command:
-n
--log-file=stdout
--fingerprint
--realm={{ domain_nextcloud }}
--external-ip={{ server_ip }}/172.20.9.9
--listening-port=3478
--min-port=49160
--max-port=49200
--use-auth-secret
--static-auth-secret={{ turn_secret }}
--no-multicast-peers
--no-cli
networks:
{{ customer }}-nextcloud:
ipv4_address: 172.20.9.9
networks:
{{ customer }}-nextcloud:
ipam:
driver: default
config:
- subnet: 172.20.9.0/28
gateway: 172.20.9.1
volumes:
{{ customer }}-nextcloud-html:
# driver: local
# driver_opts:
# size: 100g
{{ customer }}-nextcloud-database:
# driver: local
# driver_opts:
# size: 100g
{{ customer }}-nextcloud-backups:

View File

@@ -0,0 +1,55 @@
version: '3.9'
services:
nocodb:
container_name: {{ customer }}-nocodb
image: nocodb/nocodb:latest
restart: always
labels:
- "diun.enable=true"
environment:
- NC_DB=pg://{{ customer }}-nocodb-db:5432?u=postgres&p={{ nocodb_postgres_password }}&d=nocodb
volumes:
- {{ customer }}-nocodb-data:/usr/app/data
- {{ customer }}-nocodb-backups:/tmp/backups
ports:
- "127.0.0.1:3057:8080" # Host port 3057 -> Container port 8080
depends_on:
nocodb-db:
condition: service_healthy
networks:
{{ customer }}-nocodb:
ipv4_address: 172.20.24.2
nocodb-db:
container_name: {{ customer }}-nocodb-db
image: postgres:16.6
restart: always
environment:
POSTGRES_DB: nocodb
POSTGRES_USER: postgres
POSTGRES_PASSWORD: {{ nocodb_postgres_password }}
volumes:
- {{ customer }}-nocodb-postgres:/var/lib/postgresql/data
- {{ customer }}-nocodb-backups:/tmp/backups
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5
networks:
{{ customer }}-nocodb:
ipv4_address: 172.20.24.3
networks:
{{ customer }}-nocodb:
ipam:
driver: default
config:
- subnet: 172.20.24.0/28
gateway: 172.20.24.1
volumes:
{{ customer }}-nocodb-data:
{{ customer }}-nocodb-postgres:
{{ customer }}-nocodb-backups:

View File

@@ -0,0 +1,55 @@
version: '3.8'
services:
odoo-web:
container_name: {{ customer }}-odoo-web
restart: always
image: odoo:latest
labels:
- "diun.enable=true"
depends_on:
- odoo-postgres
ports:
- "127.0.0.1:3019:8069"
environment:
- HOST=odoo-postgres
- USER={{ odoo_postgres_user }}
- PASSWORD={{ odoo_postgres_password }}
volumes:
- {{ customer }}-odoo-web-data:/var/lib/odoo
- {{ customer }}-odoo-web-config:/etc/odoo
- {{ customer }}-odoo-web-addons:/mnt/extra-addons
- {{ customer }}-odoo-backups:/tmp/backups
networks:
{{ customer }}-odoo:
ipv4_address: 172.20.19.2
odoo-postgres:
container_name: {{ customer }}-odoo-postgres
image: postgres:15
restart: always
environment:
POSTGRES_DB: postgres
POSTGRES_USER: {{ odoo_postgres_user }}
POSTGRES_PASSWORD: {{ odoo_postgres_password }}
volumes:
- {{ customer }}-odoo-postgres:/var/lib/postgresql/data/
- {{ customer }}-odoo-backups:/tmp/backups
networks:
{{ customer }}-odoo:
ipv4_address: 172.20.19.3
networks:
{{ customer }}-odoo:
ipam:
driver: default
config:
- subnet: 172.20.19.0/28
gateway: 172.20.19.1
volumes:
{{ customer }}-odoo-postgres:
{{ customer }}-odoo-web-data:
{{ customer }}-odoo-web-config:
{{ customer }}-odoo-web-addons:
{{ customer }}-odoo-backups:

View File

@@ -0,0 +1,67 @@
version: '3.9'
services:
orchestrator-db:
container_name: {{ customer }}-orchestrator-db
image: postgres:16-alpine
restart: always
environment:
POSTGRES_USER: orchestrator
POSTGRES_PASSWORD: {{ orchestrator_db_password }}
POSTGRES_DB: orchestrator
volumes:
- {{ customer }}-orchestrator-db:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U orchestrator -d orchestrator"]
interval: 5s
timeout: 5s
retries: 5
networks:
{{ customer }}-orchestrator:
ipv4_address: 172.20.32.2
orchestrator-api:
container_name: {{ customer }}-orchestrator-api
image: code.letsbe.solutions/letsbe/orchestrator:latest
restart: always
labels:
- "diun.enable=true"
ports:
- '127.0.0.1:8100:8000'
command: ["sh", "-c", "alembic upgrade head && uvicorn app.main:app --host 0.0.0.0 --port 8000"]
environment:
DATABASE_URL: postgresql+asyncpg://orchestrator:{{ orchestrator_db_password }}@orchestrator-db:5432/orchestrator
DEBUG: "false"
APP_NAME: "LetsBe Orchestrator"
ADMIN_API_KEY: {{ admin_api_key }}
LOCAL_MODE: "true"
LOCAL_AGENT_KEY: {{ local_agent_key }}
HUB_URL: {{ hub_url }}
HUB_API_KEY: {{ hub_api_key }}
HUB_TELEMETRY_ENABLED: {{ hub_telemetry_enabled }}
INSTANCE_ID: {{ instance_id }}
LICENSE_KEY: {{ license_key }}
depends_on:
orchestrator-db:
condition: service_healthy
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
networks:
{{ customer }}-orchestrator:
ipv4_address: 172.20.32.3
networks:
{{ customer }}-orchestrator:
name: {{ customer }}-orchestrator
ipam:
driver: default
config:
- subnet: 172.20.32.0/28
gateway: 172.20.32.1
volumes:
{{ customer }}-orchestrator-db:

View File

@@ -0,0 +1,165 @@
---
version: "3.5"
services:
penpot-frontend:
container_name: {{ customer }}-penpot-frontend
image: "penpotapp/frontend:latest"
restart: always
labels:
- "diun.enable=true"
ports:
- '127.0.0.1:3021:80'
volumes:
- {{ customer }}-penpot-assets:/opt/data/assets
depends_on:
- penpot-backend
- penpot-exporter
# labels:
# - "traefik.enable=true"
environment:
- PENPOT_FLAGS=enable-registration enable-login-with-password
networks:
{{ customer }}-penpot:
ipv4_address: 172.20.10.2
penpot-backend:
container_name: {{ customer }}-penpot-backend
image: "penpotapp/backend:latest"
restart: always
volumes:
- {{ customer }}-penpot-assets:/opt/data/assets
- {{ customer }}-penpot-backups:/tmp/backups
depends_on:
- penpot-postgres
- penpot-redis
environment:
- PENPOT_FLAGS=enable-registration enable-login-with-password disable-email-verification enable-smtp enable-prepl-server
- PENPOT_SECRET_KEY={{ penpot_secret_key }}
- PENPOT_TELEMETRY_ENABLED=false
# - PENPOT_PREPL_HOST=0.0.0.0
- PENPOT_PUBLIC_URI=https://{{ domain_penpot }} #http://localhost:9001
## Database
- PENPOT_DATABASE_URI=postgresql://{{ customer }}-penpot-postgres/penpot
- PENPOT_DATABASE_USERNAME={{ penpot_db_user }}
- PENPOT_DATABASE_PASSWORD={{ penpot_db_password }}
- PENPOT_REDIS_URI=redis://{{ customer }}-penpot-redis/0
- PENPOT_ASSETS_STORAGE_BACKEND=assets-fs
- PENPOT_STORAGE_ASSETS_FS_DIRECTORY=/opt/data/assets
## S3
# - AWS_ACCESS_KEY_ID=<KEY_ID>
# - AWS_SECRET_ACCESS_KEY=<ACCESS_KEY>
# - PENPOT_ASSETS_STORAGE_BACKEND=assets-s3
# - PENPOT_STORAGE_ASSETS_S3_ENDPOINT=http://penpot-minio:9000
# - PENPOT_STORAGE_ASSETS_S3_BUCKET=<BUKET_NAME>
## SMTP
- PENPOT_SMTP_DEFAULT_FROM=no-reply@{{ domain }}
- PENPOT_SMTP_DEFAULT_REPLY_TO=support@{{ domain }}
- PENPOT_SMTP_HOST=mail.{{ domain }}
- PENPOT_SMTP_PORT=587
- PENPOT_SMTP_USERNAME=
- PENPOT_SMTP_PASSWORD=
- PENPOT_SMTP_TLS=true
- PENPOT_SMTP_SSL=false
networks:
{{ customer }}-penpot:
ipv4_address: 172.20.10.3
penpot-exporter:
container_name: {{ customer }}-penpot-exporter
image: "penpotapp/exporter:latest"
restart: always
environment:
- PENPOT_PUBLIC_URI=http://{{ customer }}-penpot-frontend
- PENPOT_REDIS_URI=redis://{{ customer }}-penpot-redis/0
networks:
{{ customer }}-penpot:
ipv4_address: 172.20.10.4
penpot-postgres:
container_name: {{ customer }}-penpot-postgres
image: "postgres:15"
restart: always
stop_signal: SIGINT
volumes:
- {{ customer }}-penpot-postgres:/var/lib/postgresql/data
- {{ customer }}-penpot-backups:/tmp/backups
environment:
- POSTGRES_INITDB_ARGS=--data-checksums
- POSTGRES_DB=penpot
- POSTGRES_USER={{ penpot_db_user }}
- POSTGRES_PASSWORD={{ penpot_db_password }}
networks:
{{ customer }}-penpot:
ipv4_address: 172.20.10.5
penpot-redis:
container_name: {{ customer }}-penpot-redis
image: redis:7
restart: always
networks:
{{ customer }}-penpot:
ipv4_address: 172.20.10.6
# penpot-mailcatch:
# container_name: {{ customer }}-penpot-mailcatch
# image: sj26/mailcatcher:latest
# restart: always
# # expose:
# # - '1025'
# ports:
# - "127.0.0.1:3048:1080"
# networks:
# {{ customer }}-penpot:
# ipv4_address: 172.20.10.7
networks:
{{ customer }}-penpot:
ipam:
driver: default
config:
- subnet: 172.20.10.0/28
gateway: 172.20.10.1
# networks:
# penpot:
volumes:
{{ customer }}-penpot-assets:
{{ customer }}-penpot-postgres:
{{ customer }}-penpot-backups:
## Relevant flags for frontend:
## - demo-users
## - login-with-github
## - login-with-gitlab
## - login-with-google
## - login-with-ldap
## - login-with-oidc
## - login-with-password
## - registration
## - webhooks
##
## You can read more about all available flags on:
## https://help.penpot.app/technical-guide/configuration/#advanced-configuration
##Environment variables
## Relevant flags for backend:
## - demo-users
## - email-verification
## - log-emails
## - log-invitation-tokens
## - login-with-github
## - login-with-gitlab
## - login-with-google
## - login-with-ldap
## - login-with-oidc
## - login-with-password
## - registration
## - secure-session-cookies
## - smtp
## - smtp-debug
## - telemetry
## - webhooks
## - prepl-server
## https://help.penpot.app/technical-guide/configuration/#advanced-configuration

View File

@@ -0,0 +1,33 @@
version: '3.9'
services:
portainer:
container_name: {{ customer }}-portainer
image: portainer/portainer-ce:latest
restart: always
labels:
- "diun.enable=true"
ports:
- '127.0.0.1:9000:9000'
- '127.0.0.1:9443:9443'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- {{ customer }}-portainer_data:/data
- {{ customer }}-portainer-backups:/tmp/backups
- /opt/letsbe/env/portainer_admin_password.txt:/tmp/portainer_admin_password.txt:ro
command: --admin-password-file /tmp/portainer_admin_password.txt
networks:
{{ customer }}-portainer:
ipv4_address: 172.20.20.2
networks:
{{ customer }}-portainer:
ipam:
driver: default
config:
- subnet: 172.20.20.0/28
gateway: 172.20.20.1
volumes:
{{ customer }}-portainer_data:
{{ customer }}-portainer-backups:

View File

@@ -0,0 +1,47 @@
version: '3.9'
services:
poste:
container_name: {{ customer }}-poste
image: analogic/poste.io:latest
restart: always
labels:
- "diun.enable=true"
hostname: {{ domain_poste }}
# network_mode: host
volumes:
- {{ customer }}-poste-data:/data
- {{ customer }}-poste-backups:/tmp/backups
ports:
- "25:25"
- "127.0.0.1:3003:80"
- "127.0.0.1:3004:443"
- "110:110"
- "143:143"
- "465:465"
- "587:587"
- "993:993"
- "995:995"
- "4190:4190"
environment:
TZ: Europe/Berlin
HTTPS: ON
DISABLE_CLAMAV: TRUE
DISABLE_RSPAMD: TRUE
DISABLE_ROUNDCUBE: TRUE
VIRTUAL_HOST: {{ domain_poste }}
networks:
{{ customer }}-poste:
ipv4_address: 172.20.11.2
networks:
{{ customer }}-poste:
ipam:
driver: default
config:
- subnet: 172.20.11.0/28
gateway: 172.20.11.1
volumes:
{{ customer }}-poste-data:
{{ customer }}-poste-backups:

View File

@@ -0,0 +1,16 @@
REDASH_HOST=https://{{ redash_domain }}
REDASH_MAIL_SERVER=mail.{{ domain }}
REDASH_MAIL_PORT=465
REDASH_MAIL_USE_TLS=false
REDASH_MAIL_USE_SSL=true
REDASH_MAIL_USERNAME=noreply@{{ domain }}
REDASH_MAIL_PASSWORD=
REDASH_MAIL_DEFAULT_SENDER="Redash <noreply@{{ domain }}>"
REDASH_SECRET_KEY={{ redash_secret_key }}
REDASH_DATABASE_URL=postgresql://{{ redash_postgres_user }}:{{ redash_postgres_password }}@redash-postgres:5432/redash
POSTGRES_USER={{ redash_postgres_user }}
POSTGRES_PASSWORD={{ redash_postgres_password }}
POSTGRES_DB=redash
REDASH_COOKIE_SECRET={{ redash_cookie_secret }}
REDASH_ENFORCE_HTTPS=true
REDASH_REDIS_URL=redis://redash-redis:6379/0

View File

@@ -0,0 +1,101 @@
version: "3.8"
networks:
redash_network:
driver: bridge
ipam:
config:
- subnet: 172.20.28.0/28
gateway: 172.20.28.1
x-redash-service: &redash-service
image: redash/redash:25.1.0
depends_on:
- postgres
- redis
env_file: /opt/letsbe/env/redash.env
restart: always
networks:
redash_network:
ipv4_address: 172.20.28.2
services:
server:
<<: *redash-service
command: server
labels:
- "diun.enable=true"
ports:
- "3064:5000"
environment:
REDASH_WEB_WORKERS: 4
container_name: {{ customer }}-redash-server
networks:
redash_network:
ipv4_address: 172.20.28.3
scheduler:
<<: *redash-service
command: scheduler
depends_on:
- server
container_name: {{ customer }}-redash-scheduler
networks:
redash_network:
ipv4_address: 172.20.28.4
scheduled_worker:
<<: *redash-service
command: worker
depends_on:
- server
environment:
QUEUES: "scheduled_queries,schemas"
WORKERS_COUNT: 1
container_name: {{ customer }}-redash-scheduled-worker
networks:
redash_network:
ipv4_address: 172.20.28.5
adhoc_worker:
<<: *redash-service
command: worker
depends_on:
- server
environment:
QUEUES: "queries"
WORKERS_COUNT: 2
container_name: {{ customer }}-redash-adhoc-worker
networks:
redash_network:
ipv4_address: 172.20.28.6
redis:
image: redis:7-alpine
restart: unless-stopped
container_name: redash-redis
networks:
redash_network:
ipv4_address: 172.20.28.7
postgres:
image: postgres:13-alpine
env_file: /opt/letsbe/env/redash.env
volumes:
- ./postgres-data:/var/lib/postgresql/data
restart: unless-stopped
container_name: redash-postgres
networks:
redash_network:
ipv4_address: 172.20.28.8
worker:
<<: *redash-service
command: worker
environment:
QUEUES: "periodic,emails,default"
WORKERS_COUNT: 1
container_name: {{ customer }}-redash-worker
networks:
redash_network:
ipv4_address: 172.20.28.9

View File

@@ -0,0 +1,61 @@
version: "3.5"
services:
squidex_mongo:
container_name: {{ customer }}-squidex-mongo
image: "mongo:6"
restart: always
volumes:
- {{ customer }}-squidex-mongo:/data/db
- {{ customer }}-squidex-backups:/tmp/backups
networks:
{{ customer }}-squidex:
ipv4_address: 172.20.12.2
squidex_squidex:
container_name: {{ customer }}-squidex-squidex
image: "squidex/squidex:7"
restart: always
labels:
- "diun.enable=true"
ports:
- "127.0.0.1:3002:80"
environment:
URLS__BASEURL: 'https://{{ domain_squidex }}'
UI__ONLYADMINSCANCREATEAPPS: false
UI__ONLYADMINSCANCREATETEAMS: false
EVENTSTORE__TYPE: MongoDB
EVENTSTORE__MONGODB__CONFIGURATION: mongodb://squidex_mongo
STORE__MONGODB__CONFIGURATION: mongodb://squidex_mongo
IDENTITY__ADMINEMAIL: {{ squidex_adminemail }}
IDENTITY__ADMINPASSWORD: {{ squidex_adminpassword }}
# - IDENTITY__GOOGLECLIENT=${SQUIDEX_GOOGLECLIENT}
# - IDENTITY__GOOGLESECRET=${SQUIDEX_GOOGLESECRET}
# - IDENTITY__GITHUBCLIENT=${SQUIDEX_GITHUBCLIENT}
# - IDENTITY__GITHUBSECRET=${SQUIDEX_GITHUBSECRET}
# - IDENTITY__MICROSOFTCLIENT=${SQUIDEX_MICROSOFTCLIENT}
# - IDENTITY__MICROSOFTSECRET=${SQUIDEX_MICROSOFTSECRET}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:80/healthz"]
start_period: 60s
depends_on:
- squidex_mongo
volumes:
- {{ customer }}-squidex-assets:/app/Assets
- {{ customer }}-squidex-backups:/tmp/backups
networks:
{{ customer }}-squidex:
ipv4_address: 172.20.12.3
networks:
{{ customer }}-squidex:
ipam:
driver: default
config:
- subnet: 172.20.12.0/28
gateway: 172.20.12.1
volumes:
{{ customer }}-squidex-mongo:
{{ customer }}-squidex-assets:
{{ customer }}-squidex-backups:

View File

@@ -0,0 +1,158 @@
services:
agent:
image: code.letsbe.solutions/letsbe/sysadmin-agent:latest
container_name: {{ customer }}-agent
# Join orchestrator network for container-to-container communication
networks:
- {{ customer }}-orchestrator
# Enable host.docker.internal on Linux (for accessing host services)
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
# Required: Orchestrator connection
# In LOCAL_MODE, connect via shared Docker network
- ORCHESTRATOR_URL=http://{{ customer }}-orchestrator-api:8000
# ============================================================
# AUTHENTICATION - Supports two modes (choose one)
# ============================================================
# LOCAL_MODE: Single-tenant local deployment
# When LOCAL_MODE=true, agent uses LOCAL_AGENT_KEY to register
# via the /register-local endpoint (Phase 2 secure flow)
- LOCAL_MODE=true
- LOCAL_AGENT_KEY={{ local_agent_key }}
# Multi-tenant mode: Registration token from orchestrator
# When LOCAL_MODE=false (default), agent uses REGISTRATION_TOKEN
# to register via the standard /register endpoint
# This token is obtained from the orchestrator's registration-tokens API
- REGISTRATION_TOKEN={{ sysadmin_registration_token }}
# Note: After first registration, credentials are persisted to
# ~/.letsbe-agent/credentials.json and tokens are no longer needed
# ============================================================
# Timing (seconds)
- HEARTBEAT_INTERVAL=${HEARTBEAT_INTERVAL:-30}
- POLL_INTERVAL=${POLL_INTERVAL:-5}
# Logging
- LOG_LEVEL=${LOG_LEVEL:-INFO}
- LOG_JSON=${LOG_JSON:-true}
# Resilience
- MAX_CONCURRENT_TASKS=${MAX_CONCURRENT_TASKS:-3}
- BACKOFF_BASE=${BACKOFF_BASE:-1.0}
- BACKOFF_MAX=${BACKOFF_MAX:-60.0}
- CIRCUIT_BREAKER_THRESHOLD=${CIRCUIT_BREAKER_THRESHOLD:-5}
- CIRCUIT_BREAKER_COOLDOWN=${CIRCUIT_BREAKER_COOLDOWN:-300}
# Security
- ALLOWED_FILE_ROOT=${ALLOWED_FILE_ROOT:-/opt/letsbe}
- MAX_FILE_SIZE=${MAX_FILE_SIZE:-10485760}
- SHELL_TIMEOUT=${SHELL_TIMEOUT:-60}
# Playwright browser automation
- PLAYWRIGHT_ARTIFACTS_DIR=/opt/letsbe/playwright-artifacts
- PLAYWRIGHT_DEFAULT_TIMEOUT_MS=60000
- PLAYWRIGHT_NAVIGATION_TIMEOUT_MS=120000
# MCP Browser Sidecar connection (for LLM-driven browser control)
- MCP_BROWSER_URL=http://mcp-browser:8931
- MCP_BROWSER_API_KEY={{ mcp_browser_api_key }}
volumes:
# Docker socket for container management
- /var/run/docker.sock:/var/run/docker.sock:ro
# Host directory mounts for real infrastructure access
- /opt/letsbe/env:/opt/letsbe/env
- /opt/letsbe/stacks:/opt/letsbe/stacks
- /opt/letsbe/nginx:/opt/letsbe/nginx
# Credential persistence (survives restarts without re-registration)
- agent_home:/home/agent/.letsbe-agent
# Playwright artifacts storage
- playwright_artifacts:/opt/letsbe/playwright-artifacts
# Security options for Chromium sandboxing
security_opt:
- seccomp=./chromium-seccomp.json
# Run as root for Docker socket access
# TODO: Use Docker group membership instead for better security
user: root
restart: unless-stopped
# Resource limits (increased for Playwright browser automation)
deploy:
resources:
limits:
cpus: '1.5'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
mcp-browser:
image: code.letsbe.solutions/letsbe/mcp-browser:latest
container_name: {{ customer }}-mcp-browser
# Join orchestrator network (shared with agent)
networks:
- {{ customer }}-orchestrator
environment:
# Session limits
- MAX_SESSIONS=${MAX_SESSIONS:-3}
- IDLE_TIMEOUT_SECONDS=${IDLE_TIMEOUT_SECONDS:-300}
- MAX_SESSION_LIFETIME_SECONDS=${MAX_SESSION_LIFETIME_SECONDS:-1800}
- MAX_ACTIONS_PER_SESSION=${MAX_ACTIONS_PER_SESSION:-50}
# Logging
- LOG_LEVEL=${LOG_LEVEL:-INFO}
- LOG_JSON=${LOG_JSON:-true}
# Screenshots
- SCREENSHOTS_DIR=/screenshots
# Authentication
- API_KEY={{ mcp_browser_api_key }}
volumes:
# Screenshots storage
- mcp_screenshots:/screenshots
# Security options for Chromium sandboxing
security_opt:
- seccomp=./chromium-seccomp.json
restart: unless-stopped
# Resource limits for browser automation
deploy:
resources:
limits:
cpus: '1.5'
memory: 1G
reservations:
cpus: '0.25'
memory: 256M
volumes:
agent_home:
name: {{ customer }}-agent-home
playwright_artifacts:
name: {{ customer }}-playwright-artifacts
mcp_screenshots:
name: {{ customer }}-mcp-screenshots
networks:
{{ customer }}-orchestrator:
external: true

View File

@@ -0,0 +1,37 @@
## Make sure to change this to your own random string of 32 characters (https://docs.typebot.io/self-hosting/deploy/docker#2-add-the-required-configuratio>
ENCRYPTION_SECRET={{ typebot_encryption_secret }}
DATABASE_URL=postgresql://postgres:{{ typebot_postgres_password }}@{{ customer }}-typebot-db:5432/typebot
NODE_OPTIONS=--no-node-snapshot
NEXTAUTH_URL=https://{{ domain_botlab }}
NEXT_PUBLIC_VIEWER_URL=https://{{ domain_bot_viewer }}
DEFAULT_WORKSPACE_PLAN=UNLIMITED
DISABLE_SIGNUP=false
ADMIN_EMAIL=administrator@{{ domain }}
## For more configuration options check out: https://docs.typebot.io/self-hosting/configuration
## SMTP Configuration (Make noreply email account too)
SMTP_USERNAME=noreply@{{ domain }}
SMTP_PASSWORD=
SMTP_HOST=mail.{{ domain }}
SMTP_PORT=465
SMTP_SECURE=true
NEXT_PUBLIC_SMTP_FROM="{{ company_name }} <noreply@{{ domain }}>"
SMTP_AUTH_DISABLED=false
## S3 Configuration for MinIO
S3_ACCESS_KEY=#replace
S3_SECRET_KEY=#replace
S3_BUCKET=bots
S3_PORT=
S3_ENDPOINT={{ domain_s3 }}
S3_SSL=true
S3_REGION=eu-central
S3_PUBLIC_CUSTOM_DOMAIN=https://{{ domain_s3 }}/bots

View File

@@ -0,0 +1,60 @@
version: '3.3'
volumes:
{{ customer }}-typebot-db-data:
services:
{{ customer }}-typebot-db:
image: postgres:16
restart: always
volumes:
- {{ customer }}-typebot-db-data:/var/lib/postgresql/data
environment:
- POSTGRES_DB=typebot
- POSTGRES_PASSWORD={{ typebot_postgres_password }}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
networks:
{{ customer }}-typebot:
ipv4_address: 172.20.25.2
typebot-builder:
image: baptistearno/typebot-builder:latest
restart: always
labels:
- "diun.enable=true"
depends_on:
{{ customer }}-typebot-db:
condition: service_healthy
ports:
- '3061:3000'
extra_hosts:
- 'host.docker.internal:host-gateway'
env_file: /opt/letsbe/env/typebot.env
networks:
{{ customer }}-typebot:
ipv4_address: 172.20.25.3
typebot-viewer:
image: baptistearno/typebot-viewer:latest
depends_on:
{{ customer }}-typebot-db:
condition: service_healthy
restart: always
ports:
- '3062:3000'
env_file: /opt/letsbe/env/typebot.env
networks:
{{ customer }}-typebot:
ipv4_address: 172.20.25.4
networks:
{{ customer }}-typebot:
ipam:
driver: default
config:
- subnet: 172.20.25.0/28
gateway: 172.20.25.1

View File

@@ -0,0 +1,53 @@
version: '3.9'
services:
umami:
container_name: {{ customer }}-umami
image: ghcr.io/umami-software/umami:postgresql-latest
restart: always
labels:
- "diun.enable=true"
ports:
- "127.0.0.1:3008:3000"
environment:
DATABASE_URL: postgresql://{{ umami_postgres_user }}:{{ umami_postgres_password }}@{{ customer }}-umami-db:5432/umami
DATABASE_TYPE: postgresql
APP_SECRET: '{{ umami_app_secret }}'
networks:
{{ customer }}-umami:
ipv4_address: 172.20.13.2
depends_on:
umami-db:
condition: service_healthy
umami-db:
container_name: {{ customer }}-umami-db
image: postgres:15-alpine
environment:
POSTGRES_DB: 'umami'
POSTGRES_USER: '{{ umami_postgres_user }}'
POSTGRES_PASSWORD: '{{ umami_postgres_password }}'
volumes:
- {{ customer }}-umami-postgres:/var/lib/postgresql/data
- {{ customer }}-umami-backups:/tmp/backups
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5
networks:
{{ customer }}-umami:
ipv4_address: 172.20.13.3
networks:
{{ customer }}-umami:
ipam:
driver: default
config:
- subnet: 172.20.13.0/28
gateway: 172.20.13.1
volumes:
{{ customer }}-umami-postgres:
{{ customer }}-umami-backups:

View File

@@ -0,0 +1,29 @@
version: '3.9'
services:
uptime-kuma:
container_name: {{ customer }}-uptime-kuma
image: louislam/uptime-kuma:latest
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-uptimekuma-data:/app/data
- {{ customer }}-uptimekuma-backups:/tmp/backups
ports:
- "127.0.0.1:3005:3001"
networks:
{{ customer }}-uptime-kuma:
ipv4_address: 172.20.14.2
networks:
{{ customer }}-uptime-kuma:
ipam:
driver: default
config:
- subnet: 172.20.14.0/28
gateway: 172.20.14.1
volumes:
{{ customer }}-uptimekuma-data:
{{ customer }}-uptimekuma-backups:

View File

@@ -0,0 +1,12 @@
# Vaultwarden Environment Configuration
# Copy to .env and configure
# Admin token for /admin panel access
# Generate with: openssl rand -base64 48
ADMIN_TOKEN=
# SMTP password for sending invite emails
SMTP_PASSWORD=
# SSO Client Secret (when Keycloak is enabled)
# SSO_CLIENT_SECRET=

View File

@@ -0,0 +1,73 @@
services:
vaultwarden:
container_name: {{ customer }}-vaultwarden
image: vaultwarden/server:latest
restart: unless-stopped
environment:
# Domain configuration
DOMAIN: https://vault.{{ domain }}
# Admin panel - generate secure token: openssl rand -base64 48
ADMIN_TOKEN: ${ADMIN_TOKEN}
# Signup controls - enable for initial setup, disable after first user created
SIGNUPS_ALLOWED: "true"
INVITATIONS_ALLOWED: "true"
SHOW_PASSWORD_HINT: "false"
# SSO/OpenID Connect (Keycloak integration) - disabled for initial testing
# Enable after Keycloak is configured with vaultwarden client
SSO_ENABLED: "false"
# SSO_ONLY: "false"
# SSO_AUTHORITY: https://auth.{{ domain }}/realms/{{ customer }}
# SSO_CLIENT_ID: vaultwarden
# SSO_CLIENT_SECRET: ${SSO_CLIENT_SECRET}
# SSO_PKCE: "true"
# SMTP configuration
SMTP_HOST: mail.{{ domain }}
SMTP_FROM: noreply@{{ domain }}
SMTP_USERNAME: noreply@{{ domain }}
SMTP_PASSWORD: ${SMTP_PASSWORD}
SMTP_FROM_NAME: {{ customer | title }} Vault
SMTP_PORT: 587
SMTP_SECURITY: starttls
SMTP_AUTH_MECHANISM: Login
# Database (SQLite by default, in volume)
DATABASE_URL: /data/db.sqlite3
# Logging
LOG_LEVEL: info
EXTENDED_LOGGING: "true"
# Security
DISABLE_ADMIN_TOKEN: "false"
PASSWORD_ITERATIONS: 600000
# WebSocket for live sync
WEBSOCKET_ENABLED: "true"
volumes:
- {{ customer }}-vaultwarden-data:/data
ports:
- "127.0.0.1:3071:80"
- "127.0.0.1:3072:3012"
networks:
{{ customer }}-vaultwarden:
ipv4_address: 172.20.34.2
labels:
- "diun.enable=true"
networks:
{{ customer }}-vaultwarden:
name: {{ customer }}-vaultwarden
driver: bridge
ipam:
driver: default
config:
- subnet: 172.20.34.0/28
gateway: 172.20.34.1
volumes:
{{ customer }}-vaultwarden-data:

View File

@@ -0,0 +1,6 @@
{$BASE_URL} {
bind {$ADDRESS}
reverse_proxy /ws/* http://lsp:3001
# reverse_proxy /ws_mp/* http://multiplayer:3002
reverse_proxy /* http://windmill_server:8000
}

View File

@@ -0,0 +1,166 @@
version: "3.7"
services:
windmill-db:
container_name: {{ customer }}-windmill-db
# deploy:
# replicas: 1
image: postgres:15
restart: always
volumes:
- {{ customer }}-windmill-postgres:/var/lib/postgresql/data
- {{ customer }}-windmill-backups:/tmp/backups
ports:
- "127.0.0.1:3038:5432"
environment:
POSTGRES_PASSWORD: '{{ windmill_database_password }}'
POSTGRES_DB: windmill
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.2
windmill_server:
container_name: {{ customer }}-windmill-server
image: ghcr.io/windmill-labs/windmill:main
pull_policy: always
# deploy:
# replicas: 1
restart: always
labels:
- "diun.enable=true"
ports:
- "127.0.0.1:3039:8000"
environment:
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
- BASE_URL='https://{{ domain_windmill }}'
- RUST_LOG=info
- NUM_WORKERS=0
- DISABLE_SERVER=false
- METRICS_ADDR=false
- REQUEST_SIZE_LIMIT=50097152
#- LICENSE_KEY=${WM_LICENSE_KEY}
depends_on:
windmill-db:
condition: service_healthy
volumes:
- ./oauth.json:/usr/src/app/oauth.json
- {{ customer }}-windmill-backups:/tmp/backups
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.3
windmill_worker:
container_name: {{ customer }}-windmill-worker
image: ghcr.io/windmill-labs/windmill:main
pull_policy: always
# deploy:
# replicas: 1
restart: always
environment:
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
- BASE_URL='https://{{ domain_windmill }}'
- RUST_LOG=info
- DISABLE_SERVER=true
- KEEP_JOB_DIR=false
- METRICS_ADDR=false
- WORKER_TAGS=deno,python3,go,bash,powershell,dependency,flow,hub,other,bun
#- LICENSE_KEY=${WM_LICENSE_KEY}
depends_on:
windmill-db:
condition: service_healthy
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./oauth.json:/usr/src/app/oauth.json
- {{ customer }}-windmill-worker-cache:/tmp/windmill/cache
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.4
windmill_worker_native:
container_name: {{ customer }}-windmill-worker-native
image: ghcr.io/windmill-labs/windmill:main
pull_policy: always
# deploy:
# replicas: 1
# resources:
# limits:
# cpus: "0.25"
# memory: 512M
restart: always
environment:
- DATABASE_URL=postgres://postgres:{{ windmill_database_password }}@windmill-db:5432/windmill?sslmode=disable
- BASE_URL='https://{{ domain_windmill }}'
- RUST_LOG=info
- DISABLE_SERVER=true
- KEEP_JOB_DIR=false
- METRICS_ADDR=false
- NUM_WORKERS=4
- WORKER_TAGS=nativets,postgresql,mysql,graphql,snowflake
depends_on:
windmill-db:
condition: service_healthy
volumes:
# See Oauth (https://docs.windmill.dev/docs/misc/setup_oauth)
- ./oauth.json:/usr/src/app/oauth.json
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.5
lsp:
container_name: {{ customer }}-windmill-lsp
image: ghcr.io/windmill-labs/windmill-lsp:latest
restart: always
ports:
- "127.0.0.1:3041:3001"
volumes:
- {{ customer }}-windmill-lsp-cache:/root/.cache
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.6
multiplayer:
container_name: {{ customer }}-windmill-multiplayer
image: ghcr.io/windmill-labs/windmill-multiplayer:latest
# deploy:
# replicas: 0 # Set to 1 to enable multiplayer, only available on Enterprise Edition
restart: always
ports:
- "127.0.0.1:3047:3002"
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.7
caddy:
container_name: {{ customer }}-windmill-caddy
image: caddy:2.5.2-alpine
restart: always
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- {{ customer }}-windmill-backups:/tmp/backups
ports:
- "127.0.0.1:3014:80"
environment:
BASE_URL: ":80"
networks:
{{ customer }}-windmill:
ipv4_address: 172.20.15.8
networks:
{{ customer }}-windmill:
ipam:
driver: default
config:
- subnet: 172.20.15.0/28
gateway: 172.20.15.1
volumes:
{{ customer }}-windmill-worker-cache: null
{{ customer }}-windmill-lsp-cache: null
{{ customer }}-windmill-postgres:
#{{ customer }}-windmill-data:
{{ customer }}-windmill-backups:

View File

@@ -0,0 +1,55 @@
version: '3.9'
services:
wordpress-mysql:
container_name: {{ customer }}-wordpress-mysql
image: mariadb:10.7.8
restart: always
ports:
- "127.0.0.1:3053:3306"
environment:
MYSQL_ROOT_PASSWORD: {{ wordpresss_mariadb_root_password }}
MYSQL_DATABASE: wordpress
MYSQL_USER: {{ wordpress_db_user }}
MYSQL_PASSWORD: {{ wordpress_db_password }}
volumes:
- {{ customer }}-wordpress-mariadb:/var/lib/mysql
- {{ customer }}-wordpress-backups:/tmp/backups
networks:
{{ customer }}-wordpress:
ipv4_address: 172.20.16.2
wordpress:
container_name: {{ customer }}-wordpress
image: wordpress:php8.2-apache
restart: always
labels:
- "diun.enable=true"
volumes:
- {{ customer }}-wordpress-data:/var/www/html
- {{ customer }}-wordpress-backups:/tmp/backups
ports:
- "127.0.0.1:3001:80"
environment:
WORDPRESS_DB_HOST: {{ customer }}-wordpress-mysql
WORDPRESS_DB_USER: {{ wordpress_db_user }}
WORDPRESS_DB_PASSWORD: {{ wordpress_db_password }}
WORDPRESS_DB_NAME: wordpress
depends_on:
- wordpress-mysql
networks:
{{ customer }}-wordpress:
ipv4_address: 172.20.16.3
networks:
{{ customer }}-wordpress:
ipam:
driver: default
config:
- subnet: 172.20.16.0/28
gateway: 172.20.16.1
volumes:
{{ customer }}-wordpress-mariadb:
{{ customer }}-wordpress-data:
{{ customer }}-wordpress-backups: