Files
TSYSDevStack-SupportStack-L…/demo/docker-compose.yml.template
reachableceo 25f7a6cd75 feat(demo): migrate 5 SelfStack services to demo stack (16→24 services)
Add Reactive Resume, Metrics, Kiwix, Resume Matcher, and Apple Health
from the earlier SelfStack project. Rewrite Apple Health collector to
use InfluxDB v2 with proper error handling. Update all tests, scripts,
Homepage config, env template, and documentation for the expanded stack.

New services:
- Reactive Resume (4016) + Postgres/Minio/Chrome companions
- Metrics (4021) - GitHub metrics visualization
- Kiwix (4022) - offline wiki reader
- Resume Matcher (4023) - AI resume screening
- Apple Health (4024) - health data collector → InfluxDB v2

Also adds git policy to AGENTS.md: always commit and push automatically.

💘 Generated with Crush

Assisted-by: GLM-5.1 via Crush <crush@charm.land>
2026-05-08 12:28:56 -05:00

853 lines
26 KiB
Plaintext

---
# TSYS Developer Support Stack - Docker Compose Template
# Version: 2.0
# Purpose: Demo deployment with dynamic configuration
# DEMO CONFIGURATION ONLY - NOT FOR PRODUCTION
networks:
${COMPOSE_NETWORK_NAME}:
driver: bridge
ipam:
config:
- subnet: ${NETWORK_SUBNET}
gateway: ${NETWORK_GATEWAY}
volumes:
${COMPOSE_PROJECT_NAME}_homepage_data:
driver: local
${COMPOSE_PROJECT_NAME}_pihole_data:
driver: local
${COMPOSE_PROJECT_NAME}_dockhand_data:
driver: local
${COMPOSE_PROJECT_NAME}_influxdb_data:
driver: local
${COMPOSE_PROJECT_NAME}_grafana_data:
driver: local
${COMPOSE_PROJECT_NAME}_drawio_data:
driver: local
${COMPOSE_PROJECT_NAME}_kroki_data:
driver: local
${COMPOSE_PROJECT_NAME}_atomictracker_data:
driver: local
${COMPOSE_PROJECT_NAME}_archivebox_data:
driver: local
${COMPOSE_PROJECT_NAME}_tubearchivist_data:
driver: local
${COMPOSE_PROJECT_NAME}_ta_redis_data:
driver: local
${COMPOSE_PROJECT_NAME}_ta_es_data:
driver: local
${COMPOSE_PROJECT_NAME}_wakapi_data:
driver: local
${COMPOSE_PROJECT_NAME}_mailhog_data:
driver: local
${COMPOSE_PROJECT_NAME}_atuin_data:
driver: local
${COMPOSE_PROJECT_NAME}_reactiveresume_postgres_data:
driver: local
${COMPOSE_PROJECT_NAME}_reactiveresume_minio_data:
driver: local
${COMPOSE_PROJECT_NAME}_kiwix_data:
driver: local
${COMPOSE_PROJECT_NAME}_resumematcher_data:
driver: local
services:
# Docker Socket Proxy - Security Layer
docker-socket-proxy:
image: tecnativa/docker-socket-proxy:latest
container_name: "${COMPOSE_PROJECT_NAME}-docker-socket-proxy"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- CONTAINERS=${DOCKER_SOCKET_PROXY_CONTAINERS}
- IMAGES=${DOCKER_SOCKET_PROXY_IMAGES}
- NETWORKS=${DOCKER_SOCKET_PROXY_NETWORKS}
- VOLUMES=${DOCKER_SOCKET_PROXY_VOLUMES}
- EXEC=${DOCKER_SOCKET_PROXY_EXEC}
- PRIVILEGED=${DOCKER_SOCKET_PROXY_PRIVILEGED}
- SERVICES=${DOCKER_SOCKET_PROXY_SERVICES}
- TASKS=${DOCKER_SOCKET_PROXY_TASKS}
- SECRETS=${DOCKER_SOCKET_PROXY_SECRETS}
- CONFIGS=${DOCKER_SOCKET_PROXY_CONFIGS}
- PLUGINS=${DOCKER_SOCKET_PROXY_PLUGINS}
- POST=1
- DELETE=1
- ALLOW_START=1
- ALLOW_STOP=1
- ALLOW_RESTARTS=1
deploy:
resources:
limits:
memory: 128M
labels:
homepage.group: "Infrastructure"
homepage.name: "Docker Socket Proxy"
homepage.icon: "docker"
homepage.description: >-
Secure proxy for Docker socket access (internal only)
# Homepage - Central Dashboard
homepage:
image: ghcr.io/gethomepage/homepage:latest
container_name: "${COMPOSE_PROJECT_NAME}-homepage"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${HOMEPAGE_PORT}:3000"
volumes:
- ./config/homepage:/app/config
environment:
- HOMEPAGE_ALLOWED_HOSTS=${HOMEPAGE_ALLOWED_HOSTS}
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Developer Tools"
homepage.name: "Homepage"
homepage.icon: "homepage"
homepage.href: "http://localhost:${HOMEPAGE_PORT}"
homepage.description: "Central dashboard for service discovery"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:3000"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Pi-hole - DNS Management
pihole:
image: pihole/pihole:latest
container_name: "${COMPOSE_PROJECT_NAME}-pihole"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${PIHOLE_PORT}:80"
volumes:
- ${COMPOSE_PROJECT_NAME}_pihole_data:/etc/pihole
environment:
- TZ=UTC
- WEBPASSWORD=${PIHOLE_WEBPASSWORD}
- WEBTHEME=${WEBTHEME}
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Infrastructure"
homepage.name: "Pi-hole"
homepage.icon: "pihole"
homepage.href: "http://localhost:${PIHOLE_PORT}"
homepage.description: "DNS management with ad blocking"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost/admin"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Dockhand - Docker Management
dockhand:
image: fnsys/dockhand:latest
container_name: "${COMPOSE_PROJECT_NAME}-dockhand"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${DOCKHAND_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_dockhand_data:/app/data
environment:
- DOCKER_HOST=tcp://docker-socket-proxy:2375
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
depends_on:
docker-socket-proxy:
condition: service_started
labels:
homepage.group: "Infrastructure"
homepage.name: "Dockhand"
homepage.icon: "dockhand"
homepage.href: "http://localhost:${DOCKHAND_PORT}"
homepage.description: "Modern Docker management UI"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "curl", "-f", "--silent",
"http://localhost:3000"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# InfluxDB - Time Series Database
influxdb:
image: influxdb:2.7-alpine
container_name: "${COMPOSE_PROJECT_NAME}-influxdb"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${INFLUXDB_PORT}:8086"
volumes:
- ${COMPOSE_PROJECT_NAME}_influxdb_data:/var/lib/influxdb2
environment:
- DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=${INFLUXDB_ADMIN_USER}
- DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_ADMIN_PASSWORD}
- DOCKER_INFLUXDB_INIT_ORG=${INFLUXDB_ORG}
- DOCKER_INFLUXDB_INIT_BUCKET=${INFLUXDB_BUCKET}
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_AUTH_TOKEN}
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Monitoring"
homepage.name: "InfluxDB"
homepage.icon: "influxdb"
homepage.href: "http://localhost:${INFLUXDB_PORT}"
homepage.description: "Time series database for metrics"
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:8086/ping"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Grafana - Visualization Platform
grafana:
image: grafana/grafana:latest
container_name: "${COMPOSE_PROJECT_NAME}-grafana"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${GRAFANA_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_grafana_data:/var/lib/grafana
- ./config/grafana:/etc/grafana/provisioning:ro
environment:
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
- GF_INSTALL_PLUGINS=${GF_INSTALL_PLUGINS}
- GF_SERVER_HTTP_PORT=3000
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Monitoring"
homepage.name: "Grafana"
homepage.icon: "grafana"
homepage.href: "http://localhost:${GRAFANA_PORT}"
homepage.description: "Analytics and visualization platform"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:3000/api/health"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Draw.io - Diagramming Server
drawio:
image: fjudith/draw.io:latest
container_name: "${COMPOSE_PROJECT_NAME}-drawio"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${DRAWIO_PORT}:8080"
volumes:
- ${COMPOSE_PROJECT_NAME}_drawio_data:/root
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Documentation"
homepage.name: "Draw.io"
homepage.icon: "drawio"
homepage.href: "http://localhost:${DRAWIO_PORT}"
homepage.description: "Web-based diagramming application"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "curl", "-f", "--silent",
"http://localhost:8080"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Kroki - Diagrams as a Service
kroki:
image: yuzutech/kroki:latest
container_name: "${COMPOSE_PROJECT_NAME}-kroki"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${KROKI_PORT}:8000"
volumes:
- ${COMPOSE_PROJECT_NAME}_kroki_data:/data
environment:
- KROKI_SAFE_MODE=secure
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Documentation"
homepage.name: "Kroki"
homepage.icon: "kroki"
homepage.href: "http://localhost:${KROKI_PORT}"
homepage.description: "Diagrams as a service"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "curl", "-f", "--silent",
"http://localhost:8000/health"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Atomic Tracker - Habit Tracking
atomictracker:
image: ghcr.io/majorpeter/atomic-tracker:v1.3.1
container_name: "${COMPOSE_PROJECT_NAME}-atomictracker"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${ATOMIC_TRACKER_PORT}:8080"
volumes:
- ${COMPOSE_PROJECT_NAME}_atomictracker_data:/app/data
environment:
- NODE_ENV=production
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Developer Tools"
homepage.name: "Atomic Tracker"
homepage.icon: "atomic-tracker"
homepage.href: "http://localhost:${ATOMIC_TRACKER_PORT}"
homepage.description: "Habit tracking and personal dashboard"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:8080"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# ArchiveBox - Web Archiving
archivebox:
image: archivebox/archivebox:latest
container_name: "${COMPOSE_PROJECT_NAME}-archivebox"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${ARCHIVEBOX_PORT}:8000"
volumes:
- ${COMPOSE_PROJECT_NAME}_archivebox_data:/data
environment:
- ADMIN_USERNAME=${ARCHIVEBOX_ADMIN_USER}
- ADMIN_PASSWORD=${ARCHIVEBOX_ADMIN_PASSWORD}
- ALLOWED_HOSTS=*
- CSRF_TRUSTED_ORIGINS=http://localhost:${ARCHIVEBOX_PORT}
- PUBLIC_INDEX=True
- PUBLIC_SNAPSHOTS=True
- PUBLIC_ADD_VIEW=False
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Developer Tools"
homepage.name: "ArchiveBox"
homepage.icon: "archivebox"
homepage.href: "http://localhost:${ARCHIVEBOX_PORT}"
homepage.description: "Web archiving solution"
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "curl", "-fsS",
"http://localhost:8000/health/"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 60s
# Tube Archivist - Redis
ta-redis:
image: redis:7-alpine
container_name: "${COMPOSE_PROJECT_NAME}-ta-redis"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- ${COMPOSE_PROJECT_NAME}_ta_redis_data:/data
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Tube Archivist - Elasticsearch
ta-elasticsearch:
image: elasticsearch:8.12.0
container_name: "${COMPOSE_PROJECT_NAME}-ta-elasticsearch"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- ${COMPOSE_PROJECT_NAME}_ta_es_data:/usr/share/elasticsearch/data
environment:
- discovery.type=single-node
- ES_JAVA_OPTS=${ES_JAVA_OPTS}
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- bootstrap.memory_lock=true
- path.repo=/usr/share/elasticsearch/data/snapshot
ulimits:
memlock:
soft: -1
hard: -1
deploy:
resources:
limits:
memory: 1024M
healthcheck:
test:
["CMD-SHELL",
"curl -sf http://localhost:9200/_cluster/health || exit 1"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 10
start_period: 60s
# Tube Archivist - YouTube Archiving
tubearchivist:
image: bbilly1/tubearchivist:latest
container_name: "${COMPOSE_PROJECT_NAME}-tubearchivist"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${TUBE_ARCHIVIST_PORT}:8000"
volumes:
- ${COMPOSE_PROJECT_NAME}_tubearchivist_data:/cache
environment:
- ES_URL=http://ta-elasticsearch:9200
- REDIS_CON=redis://ta-redis:6379
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- HOST_UID=${DEMO_UID}
- HOST_GID=${DEMO_GID}
- TA_HOST=${TA_HOST}
- TA_USERNAME=${TA_USERNAME}
- TA_PASSWORD=${TA_PASSWORD}
- TZ=UTC
depends_on:
ta-redis:
condition: service_healthy
ta-elasticsearch:
condition: service_healthy
labels:
homepage.group: "Developer Tools"
homepage.name: "Tube Archivist"
homepage.icon: "tube-archivist"
homepage.href: "http://localhost:${TUBE_ARCHIVIST_PORT}"
homepage.description: "YouTube video archiving"
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "curl", "-f", "--silent",
"http://localhost:8000/api/health/"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 120s
# Wakapi - Time Tracking
wakapi:
image: ghcr.io/muety/wakapi:latest
container_name: "${COMPOSE_PROJECT_NAME}-wakapi"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${WAKAPI_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_wakapi_data:/data
environment:
- WAKAPI_PASSWORD_SALT=${WAKAPI_PASSWORD_SALT}
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Developer Tools"
homepage.name: "Wakapi"
homepage.icon: "wakapi"
homepage.href: "http://localhost:${WAKAPI_PORT}"
homepage.description: "Open-source WakaTime alternative"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "/app/healthcheck"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# MailHog - Email Testing
mailhog:
image: mailhog/mailhog:latest
container_name: "${COMPOSE_PROJECT_NAME}-mailhog"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${MAILHOG_PORT}:8025"
- "${MAILHOG_SMTP_PORT}:1025"
volumes:
- ${COMPOSE_PROJECT_NAME}_mailhog_data:/maildir
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Developer Tools"
homepage.name: "MailHog"
homepage.icon: "mailhog"
homepage.href: "http://localhost:${MAILHOG_PORT}"
homepage.description: "Web and API based SMTP testing"
deploy:
resources:
limits:
memory: 128M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:8025"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Atuin - Shell History Synchronization
atuin:
image: ghcr.io/atuinsh/atuin:v18.10.0
container_name: "${COMPOSE_PROJECT_NAME}-atuin"
restart: unless-stopped
command:
- server
- start
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${ATUIN_PORT}:8888"
volumes:
- ${COMPOSE_PROJECT_NAME}_atuin_data:/config
environment:
- ATUIN_HOST=${ATUIN_HOST}
- ATUIN_PORT=8888
- ATUIN_OPEN_REGISTRATION=${ATUIN_OPEN_REGISTRATION}
- ATUIN_DB_URI=sqlite:///config/atuin.db
- RUST_LOG=info,atuin_server=info
labels:
homepage.group: "Developer Tools"
homepage.name: "Atuin"
homepage.icon: "atuin"
homepage.href: "http://localhost:${ATUIN_PORT}"
homepage.description: "Magical shell history synchronization"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8888"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 30s
# Reactive Resume - Postgres Database
reactiveresume-postgres:
image: postgres:16-alpine
container_name: "${COMPOSE_PROJECT_NAME}-reactiveresume-postgres"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- ${COMPOSE_PROJECT_NAME}_reactiveresume_postgres_data:/var/lib/postgresql/data
environment:
POSTGRES_DB: ${RESUME_POSTGRES_DB}
POSTGRES_USER: ${RESUME_POSTGRES_USER}
POSTGRES_PASSWORD: ${RESUME_POSTGRES_PASSWORD}
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${RESUME_POSTGRES_USER} -d ${RESUME_POSTGRES_DB}"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
# Reactive Resume - Minio Storage
reactiveresume-minio:
image: minio/minio
container_name: "${COMPOSE_PROJECT_NAME}-reactiveresume-minio"
restart: unless-stopped
command: server /data
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${RESUME_MINIO_PORT}:9000"
volumes:
- ${COMPOSE_PROJECT_NAME}_reactiveresume_minio_data:/data
environment:
MINIO_ROOT_USER: ${RESUME_MINIO_USER}
MINIO_ROOT_PASSWORD: ${RESUME_MINIO_PASSWORD}
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "curl", "-f", "--silent", "http://localhost:9000/minio/health/live"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Reactive Resume - Chrome (PDF Generation)
reactiveresume-chrome:
image: ghcr.io/browserless/chromium:latest
container_name: "${COMPOSE_PROJECT_NAME}-reactiveresume-chrome"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
environment:
TIMEOUT: 10000
CONCURRENT: 10
TOKEN: ${RESUME_CHROME_TOKEN}
EXIT_ON_HEALTH_FAILURE: true
PRE_REQUEST_HEALTH_CHECK: true
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "curl", "-f", "--silent", "http://localhost:3000/health"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
start_period: 30s
# Reactive Resume - Resume Builder
reactiveresume-app:
image: amruthpillai/reactive-resume:latest
container_name: "${COMPOSE_PROJECT_NAME}-reactiveresume-app"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${REACTIVE_RESUME_PORT}:3000"
depends_on:
reactiveresume-postgres:
condition: service_healthy
reactiveresume-minio:
condition: service_started
reactiveresume-chrome:
condition: service_started
environment:
PORT: 3000
NODE_ENV: production
PUBLIC_URL: http://localhost:${REACTIVE_RESUME_PORT}
STORAGE_URL: http://localhost:${RESUME_MINIO_PORT}/default
CHROME_TOKEN: ${RESUME_CHROME_TOKEN}
CHROME_URL: ws://reactiveresume-chrome:3000
DATABASE_URL: postgresql://${RESUME_POSTGRES_USER}:${RESUME_POSTGRES_PASSWORD}@reactiveresume-postgres:5432/${RESUME_POSTGRES_DB}
ACCESS_TOKEN_SECRET: ${RESUME_ACCESS_TOKEN_SECRET}
REFRESH_TOKEN_SECRET: ${RESUME_REFRESH_TOKEN_SECRET}
MAIL_FROM: noreply@localhost
STORAGE_ENDPOINT: reactiveresume-minio
STORAGE_PORT: 9000
STORAGE_REGION: us-east-1
STORAGE_BUCKET: default
STORAGE_ACCESS_KEY: ${RESUME_MINIO_USER}
STORAGE_SECRET_KEY: ${RESUME_MINIO_PASSWORD}
STORAGE_USE_SSL: "false"
STORAGE_SKIP_BUCKET_CHECK: "false"
labels:
homepage.group: "Productivity"
homepage.name: "Reactive Resume"
homepage.icon: "reactive-resume"
homepage.href: "http://localhost:${REACTIVE_RESUME_PORT}"
homepage.description: "Open-source resume builder"
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "curl", "-f", "--silent", "http://localhost:3000/api/health"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 30s
# Metrics - GitHub Metrics Visualization
metrics:
image: ghcr.io/lowlighter/metrics:latest
container_name: "${COMPOSE_PROJECT_NAME}-metrics"
restart: unless-stopped
entrypoint: [""]
command: ["npm", "start"]
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${METRICS_PORT}:3000"
volumes:
- ./config/metrics/settings.json:/metrics/settings.json:ro
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Monitoring"
homepage.name: "Metrics"
homepage.icon: "github"
homepage.href: "http://localhost:${METRICS_PORT}"
homepage.description: "GitHub metrics visualization"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
start_period: 30s
# Kiwix - Offline Wiki
kiwix:
image: ghcr.io/kiwix/kiwix-serve:latest
container_name: "${COMPOSE_PROJECT_NAME}-kiwix"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${KIWIX_PORT}:8080"
volumes:
- ${COMPOSE_PROJECT_NAME}_kiwix_data:/data
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Documentation"
homepage.name: "Kiwix"
homepage.icon: "kiwix"
homepage.href: "http://localhost:${KIWIX_PORT}"
homepage.description: "Offline wiki reader"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Resume Matcher - AI Resume Screening
resumematcher:
image: ghcr.io/srbhr/resume-matcher:latest
container_name: "${COMPOSE_PROJECT_NAME}-resumematcher"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${RESUME_MATCHER_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_resumematcher_data:/app/backend/data
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
homepage.group: "Productivity"
homepage.name: "Resume Matcher"
homepage.icon: "resume"
homepage.href: "http://localhost:${RESUME_MATCHER_PORT}"
homepage.description: "AI-powered resume screening"
deploy:
resources:
limits:
memory: 512M
healthcheck:
test: ["CMD", "curl", "-f", "--silent", "http://localhost:3000/api/v1/health"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 60s
# Apple Health - Health Data Collector
applehealth:
build:
context: ./config/applehealth
dockerfile: Dockerfile
image: tsys-applehealth:latest
container_name: "${COMPOSE_PROJECT_NAME}-applehealth"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${APPLEHEALTH_PORT}:5353"
environment:
- INFLUXDB_URL=http://influxdb:8086
- INFLUXDB_TOKEN=${INFLUXDB_AUTH_TOKEN}
- INFLUXDB_ORG=${INFLUXDB_ORG}
- INFLUXDB_BUCKET=${INFLUXDB_BUCKET}
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
depends_on:
influxdb:
condition: service_healthy
labels:
homepage.group: "Monitoring"
homepage.name: "Apple Health"
homepage.icon: "apple-health"
homepage.href: "http://localhost:${APPLEHEALTH_PORT}"
homepage.description: "Health data collection and visualization"
deploy:
resources:
limits:
memory: 256M
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:5353/health')"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
start_period: 15s