version: '3.8'

services:
  lollms-webui:
    build:
      context: .
      dockerfile: Dockerfile
    ports:
      - "${PORT:-9621}:9621"
    volumes:
      - ./personal_data:/app/personal_data  # For configurations and user data
      - ./models:/app/models  # For model storage
      - ./custom_personalities:/app/custom_personalities  # For custom personalities
    restart: unless-stopped
    # Add GPU support if needed
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              count: all
              capabilities: [gpu]