feat(demo): restore ArchiveBox, TubeArchivist, Atuin and fix all service configs

Restore 3 services that were previously removed due to health issues,
bringing the stack to 16 services. Add companion services (Elasticsearch,
Redis) required by TubeArchivist.

Key changes:
- Add ArchiveBox with proper health check and admin credentials
- Add TubeArchivist with ta-redis and ta-elasticsearch companions
- Add Atuin server with correct `server start` command and TCP health check
- Fix Wakapi health check to use /app/healthcheck binary
- Add Grafana provisioning bind mount for datasources/dashboards
- Add Homepage config bind mount for docker.yaml
- Fix Docker Socket Proxy label (remove unreachable localhost:4005 href)
- Fix credentials: INFLUXDB_ADMIN_USER and TA_USERNAME → admin
- Fix Grafana datasources.yml user to match
- Fix homepage/docker.yaml to contain Docker provider config
- Add all missing env vars (TA_PASSWORD, ELASTIC_PASSWORD, ES_JAVA_OPTS, etc.)
- Remove Pi-hole port 53 bindings (DNS not needed for demo)
- Bump template version to 2.0

💘 Generated with Crush

Assisted-by: GLM-5.1 via Crush <crush@charm.land>
This commit is contained in:
reachableceo
2026-04-27 13:06:31 -05:00
parent ed2dbea6c0
commit 077f483faf
5 changed files with 269 additions and 140 deletions

View File

@@ -1,8 +1,8 @@
---
# TSYS Developer Support Stack - Docker Compose Template
# Version: 1.0
# Version: 2.0
# Purpose: Demo deployment with dynamic configuration
# ⚠️ DEMO CONFIGURATION ONLY - NOT FOR PRODUCTION
# DEMO CONFIGURATION ONLY - NOT FOR PRODUCTION
networks:
${COMPOSE_NETWORK_NAME}:
@@ -19,7 +19,6 @@ volumes:
driver: local
${COMPOSE_PROJECT_NAME}_dockhand_data:
driver: local
${COMPOSE_PROJECT_NAME}_influxdb_data:
driver: local
${COMPOSE_PROJECT_NAME}_grafana_data:
@@ -34,6 +33,10 @@ volumes:
driver: local
${COMPOSE_PROJECT_NAME}_tubearchivist_data:
driver: local
${COMPOSE_PROJECT_NAME}_ta_redis_data:
driver: local
${COMPOSE_PROJECT_NAME}_ta_es_data:
driver: local
${COMPOSE_PROJECT_NAME}_wakapi_data:
driver: local
${COMPOSE_PROJECT_NAME}_mailhog_data:
@@ -67,8 +70,7 @@ services:
homepage.group: "Infrastructure"
homepage.name: "Docker Socket Proxy"
homepage.icon: "docker"
homepage.href: "http://localhost:${DOCKER_SOCKET_PROXY_PORT}"
homepage.description: "Secure proxy for Docker socket access"
homepage.description: "Secure proxy for Docker socket access (internal only)"
# Homepage - Central Dashboard
homepage:
@@ -81,6 +83,7 @@ services:
- "${HOMEPAGE_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_homepage_data:/app/config
- ./config/homepage:/app/config/default:ro
environment:
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
@@ -106,8 +109,6 @@ services:
- ${COMPOSE_NETWORK_NAME}
ports:
- "${PIHOLE_PORT}:80"
- "53:53/tcp"
- "53:53/udp"
volumes:
- ${COMPOSE_PROJECT_NAME}_pihole_data:/etc/pihole
environment:
@@ -201,10 +202,12 @@ services:
- "${GRAFANA_PORT}:3000"
volumes:
- ${COMPOSE_PROJECT_NAME}_grafana_data:/var/lib/grafana
- ./config/grafana:/etc/grafana/provisioning:ro
environment:
- GF_SECURITY_ADMIN_USER=${GF_SECURITY_ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
- GF_INSTALL_PLUGINS=${GF_INSTALL_PLUGINS}
- GF_SERVER_HTTP_PORT=3000
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
@@ -315,7 +318,13 @@ services:
volumes:
- ${COMPOSE_PROJECT_NAME}_archivebox_data:/data
environment:
- SECRET_KEY=${ARCHIVEBOX_SECRET_KEY}
- ADMIN_USERNAME=${ARCHIVEBOX_ADMIN_USER}
- ADMIN_PASSWORD=${ARCHIVEBOX_ADMIN_PASSWORD}
- ALLOWED_HOSTS=*
- CSRF_TRUSTED_ORIGINS=http://localhost:${ARCHIVEBOX_PORT}
- PUBLIC_INDEX=True
- PUBLIC_SNAPSHOTS=True
- PUBLIC_ADD_VIEW=False
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
labels:
@@ -325,12 +334,55 @@ services:
homepage.href: "http://localhost:${ARCHIVEBOX_PORT}"
homepage.description: "Web archiving solution"
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:8000"]
test: ["CMD", "curl", "-fsS",
"http://localhost:8000/health/"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 60s
# Tube Archivist - Redis
ta-redis:
image: redis:7-alpine
container_name: "${COMPOSE_PROJECT_NAME}-ta-redis"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- ${COMPOSE_PROJECT_NAME}_ta_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Tube Archivist - Elasticsearch
ta-elasticsearch:
image: elasticsearch:8.12.0
container_name: "${COMPOSE_PROJECT_NAME}-ta-elasticsearch"
restart: unless-stopped
networks:
- ${COMPOSE_NETWORK_NAME}
volumes:
- ${COMPOSE_PROJECT_NAME}_ta_es_data:/usr/share/elasticsearch/data
environment:
- discovery.type=single-node
- ES_JAVA_OPTS=${ES_JAVA_OPTS}
- xpack.security.enabled=false
- xpack.security.http.ssl.enabled=false
- bootstrap.memory_lock=true
- path.repo=/usr/share/elasticsearch/data/snapshot
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:9200/_cluster/health || exit 1"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 10
start_period: 60s
# Tube Archivist - YouTube Archiving
tubearchivist:
image: bbilly1/tubearchivist:latest
@@ -343,18 +395,33 @@ services:
volumes:
- ${COMPOSE_PROJECT_NAME}_tubearchivist_data:/cache
environment:
- ES_URL=http://ta-elasticsearch:9200
- REDIS_CON=redis://ta-redis:6379
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- HOST_UID=${DEMO_UID}
- HOST_GID=${DEMO_GID}
- TA_HOST=${TA_HOST}
- TA_PORT=${TA_PORT}
- TA_DEBUG=${TA_DEBUG}
- TA_USERNAME=demo
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
- TA_USERNAME=${TA_USERNAME}
- TA_PASSWORD=${TA_PASSWORD}
- TZ=UTC
depends_on:
ta-redis:
condition: service_healthy
ta-elasticsearch:
condition: service_healthy
labels:
homepage.group: "Developer Tools"
homepage.name: "Tube Archivist"
homepage.icon: "tube-archivist"
homepage.href: "http://localhost:${TUBE_ARCHIVIST_PORT}"
homepage.description: "YouTube video archiving"
healthcheck:
test: ["CMD", "curl", "-f", "--silent",
"http://localhost:8000/api/health/"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 120s
# Wakapi - Time Tracking
wakapi:
@@ -378,8 +445,7 @@ services:
homepage.href: "http://localhost:${WAKAPI_PORT}"
homepage.description: "Open-source WakaTime alternative"
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider",
"http://localhost:3000"]
test: ["CMD", "/app/healthcheck"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
@@ -411,12 +477,14 @@ services:
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: ${HEALTH_CHECK_RETRIES}
# Atuin - Shell History
# Atuin - Shell History Synchronization
atuin:
image: ghcr.io/atuinsh/atuin:v18.10.0
container_name: "${COMPOSE_PROJECT_NAME}-atuin"
restart: unless-stopped
command: server start
command:
- server
- start
networks:
- ${COMPOSE_NETWORK_NAME}
ports:
@@ -424,12 +492,20 @@ services:
volumes:
- ${COMPOSE_PROJECT_NAME}_atuin_data:/config
environment:
- ATUIN_HOST=${ATUIN_HOST}
- ATUIN_PORT=8888
- ATUIN_OPEN_REGISTRATION=${ATUIN_OPEN_REGISTRATION}
- ATUIN_DB_URI=sqlite:///config/atuin.db
- PUID=${DEMO_UID}
- PGID=${DEMO_GID}
- RUST_LOG=info,atuin_server=info
labels:
homepage.group: "Developer Tools"
homepage.name: "Atuin"
homepage.icon: "atuin"
homepage.href: "http://localhost:${ATUIN_PORT}"
homepage.description: "Magical shell history synchronization"
healthcheck:
test: ["CMD", "bash", "-c", "echo > /dev/tcp/localhost/8888"]
interval: ${HEALTH_CHECK_INTERVAL}
timeout: ${HEALTH_CHECK_TIMEOUT}
retries: 5
start_period: 30s