- Route Dockhand Docker access through docker-socket-proxy via DOCKER_HOST=tcp://docker-socket-proxy:2375 instead of direct socket mount, enforcing the security model documented in AGENTS.md - Add POST, DELETE, ALLOW_START, ALLOW_STOP, ALLOW_RESTARTS permissions to socket proxy for Dockhand container management - Add deploy.resources.limits.memory to all 16 services (128M-1024M depending on service needs) - Add MailHog SMTP port 4019 mapping (1025 internal) so applications can actually send test emails to MailHog - Remove stale config/portainer/ directory 💘 Generated with Crush Assisted-by: GLM-5.1 via Crush <crush@charm.land>
589 lines
17 KiB
Plaintext
589 lines
17 KiB
Plaintext
---
|
|
# TSYS Developer Support Stack - Docker Compose Template
|
|
# Version: 2.0
|
|
# Purpose: Demo deployment with dynamic configuration
|
|
# DEMO CONFIGURATION ONLY - NOT FOR PRODUCTION
|
|
|
|
networks:
|
|
${COMPOSE_NETWORK_NAME}:
|
|
driver: bridge
|
|
ipam:
|
|
config:
|
|
- subnet: ${NETWORK_SUBNET}
|
|
gateway: ${NETWORK_GATEWAY}
|
|
|
|
volumes:
|
|
${COMPOSE_PROJECT_NAME}_homepage_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_pihole_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_dockhand_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_influxdb_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_grafana_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_drawio_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_kroki_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_atomictracker_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_archivebox_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_tubearchivist_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_ta_redis_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_ta_es_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_wakapi_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_mailhog_data:
|
|
driver: local
|
|
${COMPOSE_PROJECT_NAME}_atuin_data:
|
|
driver: local
|
|
|
|
services:
|
|
# Docker Socket Proxy - Security Layer
|
|
docker-socket-proxy:
|
|
image: tecnativa/docker-socket-proxy:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-docker-socket-proxy"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
environment:
|
|
- CONTAINERS=${DOCKER_SOCKET_PROXY_CONTAINERS}
|
|
- IMAGES=${DOCKER_SOCKET_PROXY_IMAGES}
|
|
- NETWORKS=${DOCKER_SOCKET_PROXY_NETWORKS}
|
|
- VOLUMES=${DOCKER_SOCKET_PROXY_VOLUMES}
|
|
- EXEC=${DOCKER_SOCKET_PROXY_EXEC}
|
|
- PRIVILEGED=${DOCKER_SOCKET_PROXY_PRIVILEGED}
|
|
- SERVICES=${DOCKER_SOCKET_PROXY_SERVICES}
|
|
- TASKS=${DOCKER_SOCKET_PROXY_TASKS}
|
|
- SECRETS=${DOCKER_SOCKET_PROXY_SECRETS}
|
|
- CONFIGS=${DOCKER_SOCKET_PROXY_CONFIGS}
|
|
- PLUGINS=${DOCKER_SOCKET_PROXY_PLUGINS}
|
|
- POST=1
|
|
- DELETE=1
|
|
- ALLOW_START=1
|
|
- ALLOW_STOP=1
|
|
- ALLOW_RESTARTS=1
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 128M
|
|
labels:
|
|
homepage.group: "Infrastructure"
|
|
homepage.name: "Docker Socket Proxy"
|
|
homepage.icon: "docker"
|
|
homepage.description: "Secure proxy for Docker socket access (internal only)"
|
|
|
|
# Homepage - Central Dashboard
|
|
homepage:
|
|
image: ghcr.io/gethomepage/homepage:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-homepage"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${HOMEPAGE_PORT}:3000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_homepage_data:/app/config
|
|
- ./config/homepage:/app/config/default:ro
|
|
environment:
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "Homepage"
|
|
homepage.icon: "homepage"
|
|
homepage.href: "http://localhost:${HOMEPAGE_PORT}"
|
|
homepage.description: "Central dashboard for service discovery"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost:3000"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Pi-hole - DNS Management
|
|
pihole:
|
|
image: pihole/pihole:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-pihole"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${PIHOLE_PORT}:80"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_pihole_data:/etc/pihole
|
|
environment:
|
|
- TZ=UTC
|
|
- WEBPASSWORD=${PIHOLE_WEBPASSWORD}
|
|
- WEBTHEME=${WEBTHEME}
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Infrastructure"
|
|
homepage.name: "Pi-hole"
|
|
homepage.icon: "pihole"
|
|
homepage.href: "http://localhost:${PIHOLE_PORT}"
|
|
homepage.description: "DNS management with ad blocking"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost/admin"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Dockhand - Docker Management
|
|
dockhand:
|
|
image: fnsys/dockhand:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-dockhand"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${DOCKHAND_PORT}:3000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_dockhand_data:/app/data
|
|
environment:
|
|
- DOCKER_HOST=tcp://docker-socket-proxy:2375
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
depends_on:
|
|
docker-socket-proxy:
|
|
condition: service_started
|
|
labels:
|
|
homepage.group: "Infrastructure"
|
|
homepage.name: "Dockhand"
|
|
homepage.icon: "dockhand"
|
|
homepage.href: "http://localhost:${DOCKHAND_PORT}"
|
|
homepage.description: "Modern Docker management UI"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "--silent",
|
|
"http://localhost:3000"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# InfluxDB - Time Series Database
|
|
influxdb:
|
|
image: influxdb:2.7-alpine
|
|
container_name: "${COMPOSE_PROJECT_NAME}-influxdb"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${INFLUXDB_PORT}:8086"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_influxdb_data:/var/lib/influxdb2
|
|
environment:
|
|
- DOCKER_INFLUXDB_INIT_MODE=setup
|
|
- DOCKER_INFLUXDB_INIT_USERNAME=${INFLUXDB_ADMIN_USER}
|
|
- DOCKER_INFLUXDB_INIT_PASSWORD=${INFLUXDB_ADMIN_PASSWORD}
|
|
- DOCKER_INFLUXDB_INIT_ORG=${INFLUXDB_ORG}
|
|
- DOCKER_INFLUXDB_INIT_BUCKET=${INFLUXDB_BUCKET}
|
|
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=${INFLUXDB_AUTH_TOKEN}
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Monitoring"
|
|
homepage.name: "InfluxDB"
|
|
homepage.icon: "influxdb"
|
|
homepage.href: "http://localhost:${INFLUXDB_PORT}"
|
|
homepage.description: "Time series database for metrics"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 512M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost:8086/ping"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Grafana - Visualization Platform
|
|
grafana:
|
|
image: grafana/grafana:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-grafana"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${GRAFANA_PORT}:3000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_grafana_data:/var/lib/grafana
|
|
- ./config/grafana:/etc/grafana/provisioning:ro
|
|
environment:
|
|
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
|
|
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
|
|
- GF_INSTALL_PLUGINS=${GF_INSTALL_PLUGINS}
|
|
- GF_SERVER_HTTP_PORT=3000
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Monitoring"
|
|
homepage.name: "Grafana"
|
|
homepage.icon: "grafana"
|
|
homepage.href: "http://localhost:${GRAFANA_PORT}"
|
|
homepage.description: "Analytics and visualization platform"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost:3000/api/health"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Draw.io - Diagramming Server
|
|
drawio:
|
|
image: fjudith/draw.io:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-drawio"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${DRAWIO_PORT}:8080"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_drawio_data:/root
|
|
environment:
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Documentation"
|
|
homepage.name: "Draw.io"
|
|
homepage.icon: "drawio"
|
|
homepage.href: "http://localhost:${DRAWIO_PORT}"
|
|
homepage.description: "Web-based diagramming application"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "--silent",
|
|
"http://localhost:8080"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Kroki - Diagrams as a Service
|
|
kroki:
|
|
image: yuzutech/kroki:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-kroki"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${KROKI_PORT}:8000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_kroki_data:/data
|
|
environment:
|
|
- KROKI_SAFE_MODE=secure
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Documentation"
|
|
homepage.name: "Kroki"
|
|
homepage.icon: "kroki"
|
|
homepage.href: "http://localhost:${KROKI_PORT}"
|
|
homepage.description: "Diagrams as a service"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "--silent",
|
|
"http://localhost:8000/health"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Atomic Tracker - Habit Tracking
|
|
atomictracker:
|
|
image: ghcr.io/majorpeter/atomic-tracker:v1.3.1
|
|
container_name: "${COMPOSE_PROJECT_NAME}-atomictracker"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${ATOMIC_TRACKER_PORT}:8080"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_atomictracker_data:/app/data
|
|
environment:
|
|
- NODE_ENV=production
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "Atomic Tracker"
|
|
homepage.icon: "atomic-tracker"
|
|
homepage.href: "http://localhost:${ATOMIC_TRACKER_PORT}"
|
|
homepage.description: "Habit tracking and personal dashboard"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost:8080"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# ArchiveBox - Web Archiving
|
|
archivebox:
|
|
image: archivebox/archivebox:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-archivebox"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${ARCHIVEBOX_PORT}:8000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_archivebox_data:/data
|
|
environment:
|
|
- ADMIN_USERNAME=${ARCHIVEBOX_ADMIN_USER}
|
|
- ADMIN_PASSWORD=${ARCHIVEBOX_ADMIN_PASSWORD}
|
|
- ALLOWED_HOSTS=*
|
|
- CSRF_TRUSTED_ORIGINS=http://localhost:${ARCHIVEBOX_PORT}
|
|
- PUBLIC_INDEX=True
|
|
- PUBLIC_SNAPSHOTS=True
|
|
- PUBLIC_ADD_VIEW=False
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "ArchiveBox"
|
|
homepage.icon: "archivebox"
|
|
homepage.href: "http://localhost:${ARCHIVEBOX_PORT}"
|
|
homepage.description: "Web archiving solution"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 512M
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-fsS",
|
|
"http://localhost:8000/health/"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: 5
|
|
start_period: 60s
|
|
|
|
# Tube Archivist - Redis
|
|
ta-redis:
|
|
image: redis:7-alpine
|
|
container_name: "${COMPOSE_PROJECT_NAME}-ta-redis"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_ta_redis_data:/data
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "redis-cli", "ping"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Tube Archivist - Elasticsearch
|
|
ta-elasticsearch:
|
|
image: elasticsearch:8.12.0
|
|
container_name: "${COMPOSE_PROJECT_NAME}-ta-elasticsearch"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_ta_es_data:/usr/share/elasticsearch/data
|
|
environment:
|
|
- discovery.type=single-node
|
|
- ES_JAVA_OPTS=${ES_JAVA_OPTS}
|
|
- xpack.security.enabled=false
|
|
- xpack.security.http.ssl.enabled=false
|
|
- bootstrap.memory_lock=true
|
|
- path.repo=/usr/share/elasticsearch/data/snapshot
|
|
ulimits:
|
|
memlock:
|
|
soft: -1
|
|
hard: -1
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 1024M
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "curl -sf http://localhost:9200/_cluster/health || exit 1"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: 10
|
|
start_period: 60s
|
|
|
|
# Tube Archivist - YouTube Archiving
|
|
tubearchivist:
|
|
image: bbilly1/tubearchivist:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-tubearchivist"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${TUBE_ARCHIVIST_PORT}:8000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_tubearchivist_data:/cache
|
|
environment:
|
|
- ES_URL=http://ta-elasticsearch:9200
|
|
- REDIS_CON=redis://ta-redis:6379
|
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
|
- HOST_UID=${DEMO_UID}
|
|
- HOST_GID=${DEMO_GID}
|
|
- TA_HOST=${TA_HOST}
|
|
- TA_USERNAME=${TA_USERNAME}
|
|
- TA_PASSWORD=${TA_PASSWORD}
|
|
- TZ=UTC
|
|
depends_on:
|
|
ta-redis:
|
|
condition: service_healthy
|
|
ta-elasticsearch:
|
|
condition: service_healthy
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "Tube Archivist"
|
|
homepage.icon: "tube-archivist"
|
|
homepage.href: "http://localhost:${TUBE_ARCHIVIST_PORT}"
|
|
homepage.description: "YouTube video archiving"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 512M
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "--silent",
|
|
"http://localhost:8000/api/health/"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: 5
|
|
start_period: 120s
|
|
|
|
# Wakapi - Time Tracking
|
|
wakapi:
|
|
image: ghcr.io/muety/wakapi:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-wakapi"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${WAKAPI_PORT}:3000"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_wakapi_data:/data
|
|
environment:
|
|
- WAKAPI_PASSWORD_SALT=${WAKAPI_PASSWORD_SALT}
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "Wakapi"
|
|
homepage.icon: "wakapi"
|
|
homepage.href: "http://localhost:${WAKAPI_PORT}"
|
|
homepage.description: "Open-source WakaTime alternative"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "/app/healthcheck"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# MailHog - Email Testing
|
|
mailhog:
|
|
image: mailhog/mailhog:latest
|
|
container_name: "${COMPOSE_PROJECT_NAME}-mailhog"
|
|
restart: unless-stopped
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${MAILHOG_PORT}:8025"
|
|
- "${MAILHOG_SMTP_PORT}:1025"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_mailhog_data:/maildir
|
|
environment:
|
|
- PUID=${DEMO_UID}
|
|
- PGID=${DEMO_GID}
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "MailHog"
|
|
homepage.icon: "mailhog"
|
|
homepage.href: "http://localhost:${MAILHOG_PORT}"
|
|
homepage.description: "Web and API based SMTP testing"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 128M
|
|
healthcheck:
|
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
|
|
"http://localhost:8025"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: ${HEALTH_CHECK_RETRIES}
|
|
|
|
# Atuin - Shell History Synchronization
|
|
atuin:
|
|
image: ghcr.io/atuinsh/atuin:v18.10.0
|
|
container_name: "${COMPOSE_PROJECT_NAME}-atuin"
|
|
restart: unless-stopped
|
|
command:
|
|
- server
|
|
- start
|
|
networks:
|
|
- ${COMPOSE_NETWORK_NAME}
|
|
ports:
|
|
- "${ATUIN_PORT}:8888"
|
|
volumes:
|
|
- ${COMPOSE_PROJECT_NAME}_atuin_data:/config
|
|
environment:
|
|
- ATUIN_HOST=${ATUIN_HOST}
|
|
- ATUIN_PORT=8888
|
|
- ATUIN_OPEN_REGISTRATION=${ATUIN_OPEN_REGISTRATION}
|
|
- ATUIN_DB_URI=sqlite:///config/atuin.db
|
|
- RUST_LOG=info,atuin_server=info
|
|
labels:
|
|
homepage.group: "Developer Tools"
|
|
homepage.name: "Atuin"
|
|
homepage.icon: "atuin"
|
|
homepage.href: "http://localhost:${ATUIN_PORT}"
|
|
homepage.description: "Magical shell history synchronization"
|
|
deploy:
|
|
resources:
|
|
limits:
|
|
memory: 256M
|
|
healthcheck:
|
|
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8888"]
|
|
interval: ${HEALTH_CHECK_INTERVAL}
|
|
timeout: ${HEALTH_CHECK_TIMEOUT}
|
|
retries: 5
|
|
start_period: 30s
|