openreplay/scripts/docker-compose/docker-compose.yaml
Rajesh Rajendran b3594136ce or 1940 upstream docker release with the existing installation (#3316)
* chore(docker): Adding dynamic env generator
* ci(make): Create deployment yamls
* ci(make): Generating docker envs
* change env name structure
* proper env names
* chore(docker): clickhouse
* chore(docker-compose): generate env file format
* chore(docker-compose): Adding docker-compose
* chore(docker-compose): format make
* chore(docker-compose): Update version
* chore(docker-compose): adding new secrets
* ci(make): default target
* ci(Makefile): Update common protocol
* chore(docker-compose): refactor folder structure
* ci(make): rename to docker-envs
* feat(docker): add clickhouse volume definition
Add clickhouse persistent volume to the docker-compose configuration
to ensure data is preserved between container restarts.
* refactor: move env files to docker-envs directory
Updates all environment file references in docker-compose.yaml to use a
consistent directory structure, placing them under the docker-envs/
directory for better organization.
* fix(docker): rename imagestorage to images
 The `imagestorage` service and related environment file
 have been renamed to `images` for clarity and consistency.
 This change reflects the service's purpose of handling
 images.
* feat(docker): introduce docker-compose template
 A new docker-compose template
 to generate docker-compose files from a list of services.
 The template uses helm syntax.
* fix: Properly set FILES variable in Makefile
 The FILES variable was not being set correctly in the
 Makefile due to subshell issues. This commit fixes the
 variable assignment and ensures that the variable is
 accessible in subsequent commands.
* feat: Refactor docker-compose template for local development
 This commit introduces a complete overhaul of the
 docker-compose template, switching from a helm-based
 template to a native docker-compose.yml file. This
 change simplifies local development and makes it easier
 to manage the OpenReplay stack.
 The new template includes services for:
 - PostgreSQL
 - ClickHouse
 - Redis
 - MinIO
 - Nginx
 - Caddy
 It also includes migration jobs for setting up the
 database and MinIO.
* fix(docker-compose): Add fallback empty environment
 Add an empty environment to the docker-compose template to prevent
 errors when the env_file is missing. This ensures that the
 container can start even if the environment file is not present.
* feat(docker): Add domainname and aliases to services
 This change adds the `domainname` and `aliases` attributes to each
 service in the docker-compose.yaml file. This is to ensure that
 the services can communicate with each other using their fully
 qualified domain names. Also adds shared volume and empty
 environment variables.
* update version
* chore(docker): don't pull parallel
* chore(docker-compose): proper pull
* chore(docker-compose): Update db service urls
* fix(docker-compose): clickhouse url
* chore(clickhouse): Adding clickhouse db migration
* chore(docker-compose): Adding clickhouse
* fix(tpl): variable injection
* chore(fix): compose tpl variable rendering
* chore(docker-compose): Allow override pg variable
* chore(helm): remove assist-server
* chore(helm): pg integrations
* chore(nginx): removed services
* chore(docker-compose): Mulitple aliases
* chore(docker-compose): Adding more env vars
* feat(install): Dynamically generate passwords
 dynamic password generation by
 identifying `change_me_*` entries in `common.env` and
 replacing them with random passwords. This enhances
 security and simplifies initial setup.
 The changes include:
 - Replacing hardcoded password replacements with a loop
   that iterates through all `change_me_*` entries.
 - Using `grep` to find all `change_me_*` tokens.
 - Generating a random password for each token.
 - Updating the `common.env` file with the generated
   passwords.
* chore(docker-compose): disable clickhouse password
* fix(docker-compose): clickhouse-migration
* compose: chalice env
* chore(docker-compose): overlay vars
* chore(docker): Adding ch port
* chore(docker-compose): disable clickhouse password
* fix(docker-compose): migration name
* feat(docker): skip specific values
* chore(docker-compose): define namespace
---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
2025-04-23 17:52:50 +02:00

493 lines
13 KiB
YAML

# vim: ft=yaml
version: '3'
services:
postgresql:
image: bitnami/postgresql:${POSTGRES_VERSION}
container_name: postgres
volumes:
- pgdata:/var/lib/postgresql/data
networks:
openreplay-net:
aliases:
- postgresql.db.svc.cluster.local
environment:
POSTGRESQL_PASSWORD: "${COMMON_PG_PASSWORD}"
clickhouse:
image: clickhouse/clickhouse-server:${CLICKHOUSE_VERSION}
container_name: clickhouse
volumes:
- clickhouse:/var/lib/clickhouse
networks:
openreplay-net:
aliases:
- clickhouse-openreplay-clickhouse.db.svc.cluster.local
environment:
CLICKHOUSE_USER: "default"
CLICKHOUSE_PASSWORD: ""
CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: "1"
redis:
image: bitnami/redis:${REDIS_VERSION}
container_name: redis
volumes:
- redisdata:/bitnami/redis/data
networks:
openreplay-net:
aliases:
- redis-master.db.svc.cluster.local
environment:
ALLOW_EMPTY_PASSWORD: "yes"
minio:
image: bitnami/minio:${MINIO_VERSION}
container_name: minio
volumes:
- miniodata:/bitnami/minio/data
networks:
openreplay-net:
aliases:
- minio.db.svc.cluster.local
ports:
- 9001:9001
environment:
MINIO_ROOT_USER: ${COMMON_S3_KEY}
MINIO_ROOT_PASSWORD: ${COMMON_S3_SECRET}
fs-permission:
image: debian:stable-slim
container_name: fs-permission
profiles:
- "migration"
volumes:
- shared-volume:/mnt/efs
- miniodata:/mnt/minio
- pgdata:/mnt/postgres
entrypoint:
- /bin/bash
- -c
- |
chown -R 1001:1001 /mnt/{efs,minio,postgres}
restart: on-failure
minio-migration:
image: bitnami/minio:2020.10.9-debian-10-r6
container_name: minio-migration
profiles:
- "migration"
depends_on:
- minio
- fs-permission
networks:
- openreplay-net
volumes:
- ../helmcharts/openreplay/files/minio.sh:/tmp/minio.sh
environment:
MINIO_HOST: http://minio.db.svc.cluster.local:9000
MINIO_ACCESS_KEY: ${COMMON_S3_KEY}
MINIO_SECRET_KEY: ${COMMON_S3_SECRET}
user: root
entrypoint:
- /bin/bash
- -c
- |
apt update && apt install netcat -y
# Wait for Minio to be ready
until nc -z -v -w30 minio 9000; do
echo "Waiting for Minio server to be ready..."
sleep 1
done
bash /tmp/minio.sh init || exit 100
db-migration:
image: bitnami/postgresql:14.5.0
container_name: db-migration
profiles:
- "migration"
depends_on:
- postgresql
- minio-migration
networks:
- openreplay-net
volumes:
- ../schema/db/init_dbs/postgresql/init_schema.sql:/tmp/init_schema.sql
environment:
PGHOST: postgresql
PGPORT: 5432
PGDATABASE: postgres
PGUSER: postgres
PGPASSWORD: ${COMMON_PG_PASSWORD}
entrypoint:
- /bin/bash
- -c
- |
until psql -c '\q'; do
echo "PostgreSQL is unavailable - sleeping"
sleep 1
done
echo "PostgreSQL is up - executing command"
psql -v ON_ERROR_STOP=1 -f /tmp/init_schema.sql
clickhouse-migration:
image: clickhouse/clickhouse-server:${CLICKHOUSE_VERSION}
container_name: clickhouse-migration
profiles:
- "migration"
depends_on:
- clickhouse
- minio-migration
networks:
- openreplay-net
volumes:
- ../schema/db/init_dbs/clickhouse/init_schema.sql:/tmp/init_schema.sql
environment:
CH_HOST: "clickhouse-openreplay-clickhouse.db.svc.cluster.local"
CH_PORT: "9000"
CH_PORT_HTTP: "8123"
CH_USERNAME: "default"
CH_PASSWORD: ""
entrypoint:
- /bin/bash
- -c
- |
# Checking variable is empty. Shell independant method.
# Wait for Minio to be ready
until nc -z -v -w30 clickhouse-openreplay-clickhouse.db.svc.cluster.local 9000; do
echo "Waiting for Minio server to be ready..."
sleep 1
done
echo "clickhouse is up - executing command"
clickhouse-client -h ${CH_HOST} --user ${CH_USERNAME} ${CH_PASSWORD} --port ${CH_PORT} --multiquery < /tmp/init_schema.sql || true
alerts-openreplay:
image: public.ecr.aws/p1t3u8a3/alerts:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: alerts
networks:
openreplay-net:
aliases:
- alerts-openreplay
- alerts-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/alerts.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
analytics-openreplay:
image: public.ecr.aws/p1t3u8a3/analytics:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: analytics
networks:
openreplay-net:
aliases:
- analytics-openreplay
- analytics-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/analytics.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
http-openreplay:
image: public.ecr.aws/p1t3u8a3/http:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: http
networks:
openreplay-net:
aliases:
- http-openreplay
- http-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/http.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
images-openreplay:
image: public.ecr.aws/p1t3u8a3/images:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: images
networks:
openreplay-net:
aliases:
- images-openreplay
- images-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/images.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
integrations-openreplay:
image: public.ecr.aws/p1t3u8a3/integrations:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: integrations
networks:
openreplay-net:
aliases:
- integrations-openreplay
- integrations-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/integrations.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
sink-openreplay:
image: public.ecr.aws/p1t3u8a3/sink:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: sink
networks:
openreplay-net:
aliases:
- sink-openreplay
- sink-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/sink.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
sourcemapreader-openreplay:
image: public.ecr.aws/p1t3u8a3/sourcemapreader:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: sourcemapreader
networks:
openreplay-net:
aliases:
- sourcemapreader-openreplay
- sourcemapreader-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/sourcemapreader.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
spot-openreplay:
image: public.ecr.aws/p1t3u8a3/spot:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: spot
networks:
openreplay-net:
aliases:
- spot-openreplay
- spot-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/spot.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
storage-openreplay:
image: public.ecr.aws/p1t3u8a3/storage:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: storage
networks:
openreplay-net:
aliases:
- storage-openreplay
- storage-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/storage.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
assets-openreplay:
image: public.ecr.aws/p1t3u8a3/assets:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: assets
networks:
openreplay-net:
aliases:
- assets-openreplay
- assets-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/assets.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
assist-openreplay:
image: public.ecr.aws/p1t3u8a3/assist:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: assist
networks:
openreplay-net:
aliases:
- assist-openreplay
- assist-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/assist.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
canvases-openreplay:
image: public.ecr.aws/p1t3u8a3/canvases:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: canvases
networks:
openreplay-net:
aliases:
- canvases-openreplay
- canvases-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/canvases.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
chalice-openreplay:
image: public.ecr.aws/p1t3u8a3/chalice:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: chalice
networks:
openreplay-net:
aliases:
- chalice-openreplay
- chalice-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/chalice.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
db-openreplay:
image: public.ecr.aws/p1t3u8a3/db:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: db
networks:
openreplay-net:
aliases:
- db-openreplay
- db-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/db.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
ender-openreplay:
image: public.ecr.aws/p1t3u8a3/ender:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: ender
networks:
openreplay-net:
aliases:
- ender-openreplay
- ender-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/ender.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
frontend-openreplay:
image: public.ecr.aws/p1t3u8a3/frontend:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: frontend
networks:
openreplay-net:
aliases:
- frontend-openreplay
- frontend-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/frontend.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
heuristics-openreplay:
image: public.ecr.aws/p1t3u8a3/heuristics:${COMMON_VERSION}
domainname: app.svc.cluster.local
container_name: heuristics
networks:
openreplay-net:
aliases:
- heuristics-openreplay
- heuristics-openreplay.app.svc.cluster.local
volumes:
- shared-volume:/mnt/efs
env_file:
- docker-envs/heuristics.env
environment: {} # Fallback empty environment if env_file is missing
restart: unless-stopped
nginx-openreplay:
image: nginx:latest
container_name: nginx
networks:
- openreplay-net
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf
restart: unless-stopped
caddy:
image: caddy:latest
container_name: caddy
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
networks:
- openreplay-net
environment:
- ACME_AGREE=true # Agree to Let's Encrypt Subscriber Agreement
- CADDY_DOMAIN=${CADDY_DOMAIN}
restart: unless-stopped
volumes:
pgdata:
clickhouse:
redisdata:
miniodata:
shared-volume:
caddy_data:
caddy_config:
networks:
openreplay-net: