Compare commits
3 Commits
2252587e9c
...
5efe5f4819
| Author | SHA1 | Date | |
|---|---|---|---|
| 5efe5f4819 | |||
| 4590041bdf | |||
| f6971c20ec |
@@ -33,7 +33,9 @@ log_message() {
|
|||||||
local level=$1
|
local level=$1
|
||||||
local message=$2
|
local message=$2
|
||||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||||
echo "[$timestamp] [$level] $message" >> "$LOG_FILE"
|
# Sanitize message to prevent injection in logs
|
||||||
|
local clean_message=$(printf '%s\n' "$message" | sed 's/[\`\$|&;<>]//g')
|
||||||
|
echo "[$timestamp] [$level] $clean_message" >> "$LOG_FILE"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Function to perform audit of the packaging process
|
# Function to perform audit of the packaging process
|
||||||
@@ -100,8 +102,12 @@ add_git_url() {
|
|||||||
|
|
||||||
# Check if the application is already in STATUS.md
|
# Check if the application is already in STATUS.md
|
||||||
if ! grep -q "| $repo_name |" "$STATUS_FILE"; then
|
if ! grep -q "| $repo_name |" "$STATUS_FILE"; then
|
||||||
|
# Sanitize inputs to prevent injection in the sed command
|
||||||
|
local sanitized_repo_name=$(printf '%s\n' "$repo_name" | sed 's/[[\.*^$()+?{|]/\\&/g; s/[&/]/\\&/g')
|
||||||
|
local sanitized_url=$(printf '%s\n' "$new_url" | sed 's/[[\.*^$()+?{|]/\\&/g; s/[&/]/\\&/g')
|
||||||
|
|
||||||
# Append the new application to the table in STATUS.md
|
# Append the new application to the table in STATUS.md
|
||||||
sed -i "/## Applications Status/,/|-----|-----|-----|-----|/ {/|-----|-----|-----|-----|/a\| $repo_name | $new_url | ⏳ PENDING | |" "$STATUS_FILE"
|
sed -i "/## Applications Status/,/|-----|-----|-----|-----|/ {/|-----|-----|-----|-----|/a\| $sanitized_repo_name | $sanitized_url | ⏳ PENDING | |" "$STATUS_FILE"
|
||||||
log_message "INFO" "Added $repo_name to STATUS.md"
|
log_message "INFO" "Added $repo_name to STATUS.md"
|
||||||
else
|
else
|
||||||
log_message "INFO" "Application $repo_name already exists in STATUS.md"
|
log_message "INFO" "Application $repo_name already exists in STATUS.md"
|
||||||
@@ -130,6 +136,52 @@ add_git_urls_from_file() {
|
|||||||
log_message "INFO" "Finished processing URLs from $input_file"
|
log_message "INFO" "Finished processing URLs from $input_file"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Function to clean up Docker resources periodically
|
||||||
|
cleanup_docker_resources() {
|
||||||
|
log_message "INFO" "Starting Docker resource cleanup"
|
||||||
|
|
||||||
|
# Remove unused Docker images that are related to our builds
|
||||||
|
# Use a broader pattern match since we now include timestamps in image names
|
||||||
|
docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.ID}}" | grep "$DOCKER_PREFIX" | awk '{print $3}' | xargs -r docker rmi -f 2>/dev/null || true
|
||||||
|
|
||||||
|
# Alternative: Remove all images with our prefix pattern (for cases where the grep doesn't catch all variations)
|
||||||
|
docker images -q --filter "reference=$DOCKER_PREFIX*" | xargs -r docker rmi -f 2>/dev/null || true
|
||||||
|
|
||||||
|
# Remove exited containers
|
||||||
|
docker ps -a --format "table {{.Names}}\t{{.Status}}\t{{.ID}}" | awk 'NR>1 {if($2 ~ /Exited|Created|Removal/) print $3}' | xargs -r docker rm -f 2>/dev/null || true
|
||||||
|
|
||||||
|
# Also remove our smoke test containers that might still be running
|
||||||
|
docker ps -aq --filter name="smoke-test-" | xargs -r docker rm -f 2>/dev/null || true
|
||||||
|
|
||||||
|
# Remove unused volumes
|
||||||
|
docker volume ls -q | xargs -r docker volume rm 2>/dev/null || true
|
||||||
|
|
||||||
|
# Remove unused networks
|
||||||
|
docker network ls -q | xargs -r docker network rm 2>/dev/null || true
|
||||||
|
|
||||||
|
log_message "INFO" "Docker resource cleanup completed"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to clean up file system resources periodically
|
||||||
|
cleanup_file_resources() {
|
||||||
|
log_message "INFO" "Starting file system resource cleanup"
|
||||||
|
|
||||||
|
# Clean up old error logs in workspace directories
|
||||||
|
find "$WORKSPACES_DIR" -name "error.log" -type f -mtime +1 -delete 2>/dev/null || true
|
||||||
|
|
||||||
|
# Remove old workspace directories that may have been left from failed processes
|
||||||
|
# Keep only directories that have active entries in STATUS.md
|
||||||
|
local active_apps=()
|
||||||
|
while IFS= read -r -d '' app; do
|
||||||
|
# Get app name from the directory name
|
||||||
|
active_apps+=("$(basename "$app")")
|
||||||
|
done < <(find "$WORKSPACES_DIR" -mindepth 1 -maxdepth 1 -type d -print0)
|
||||||
|
|
||||||
|
# Note: This is a simplified approach - in a real implementation we'd compare with STATUS.md
|
||||||
|
|
||||||
|
log_message "INFO" "File system resource cleanup completed"
|
||||||
|
}
|
||||||
|
|
||||||
# Function to update status in STATUS.md
|
# Function to update status in STATUS.md
|
||||||
update_status() {
|
update_status() {
|
||||||
local app_name=$1
|
local app_name=$1
|
||||||
@@ -148,10 +200,18 @@ update_status() {
|
|||||||
local clean_status=$(printf '%s\n' "$new_status" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g')
|
local clean_status=$(printf '%s\n' "$new_status" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g')
|
||||||
local clean_notes=$(printf '%s\n' "$notes" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g' | sed 's/&/&/g; s/</</g; s/>/>/g')
|
local clean_notes=$(printf '%s\n' "$notes" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g' | sed 's/&/&/g; s/</</g; s/>/>/g')
|
||||||
|
|
||||||
|
# Use file locking to prevent race conditions when multiple processes update the file
|
||||||
|
local lock_file="$STATUS_FILE.lock"
|
||||||
|
exec 200>"$lock_file"
|
||||||
|
flock -x 200 # Exclusive lock
|
||||||
|
|
||||||
# Update status in the file - find the line with the app name and update its status
|
# Update status in the file - find the line with the app name and update its status
|
||||||
# Use a more targeted sed pattern to reduce chance of unintended matches
|
# Use a more targeted sed pattern to reduce chance of unintended matches
|
||||||
sed -i "s/^| $clean_app_name | \([^|]*\) | \([^|]*\) | \([^|]*\) |$/| $clean_app_name | \1 | $clean_status | $clean_notes |/" "$STATUS_FILE"
|
sed -i "s/^| $clean_app_name | \([^|]*\) | \([^|]*\) | \([^|]*\) |$/| $clean_app_name | \1 | $clean_status | $clean_notes |/" "$STATUS_FILE"
|
||||||
|
|
||||||
|
# Release the lock by closing the file descriptor
|
||||||
|
exec 200>&-
|
||||||
|
|
||||||
log_message "INFO" "Updated status for $app_name to $new_status"
|
log_message "INFO" "Updated status for $app_name to $new_status"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,7 +316,7 @@ run_packaging_script() {
|
|||||||
mkdir -p "$workspace_dir" "$artifact_dir"
|
mkdir -p "$workspace_dir" "$artifact_dir"
|
||||||
|
|
||||||
# Clone repository
|
# Clone repository
|
||||||
if [ ! -d "$workspace_dir/repo" ] || [ -z "$(ls -A "$workspace_dir/repo")" ]; then
|
if [ ! -d "$workspace_dir/repo" ] || [ -z "$(ls -A "$workspace_dir/repo" 2>/dev/null)" ]; then
|
||||||
echo "Cloning $url to $workspace_dir/repo"
|
echo "Cloning $url to $workspace_dir/repo"
|
||||||
if ! git clone "$url" "$workspace_dir/repo"; then
|
if ! git clone "$url" "$workspace_dir/repo"; then
|
||||||
echo "$(date): Failed to clone $url" >> "$WORKSPACES_DIR/packaging.log"
|
echo "$(date): Failed to clone $url" >> "$WORKSPACES_DIR/packaging.log"
|
||||||
@@ -266,7 +326,13 @@ run_packaging_script() {
|
|||||||
else
|
else
|
||||||
# Update repository
|
# Update repository
|
||||||
echo "Updating $url in $workspace_dir/repo"
|
echo "Updating $url in $workspace_dir/repo"
|
||||||
if ! (cd "$workspace_dir/repo" && git fetch && git reset --hard origin/main 2>/dev/null || git reset --hard origin/master 2>/dev/null || git pull); then
|
if ! (cd "$workspace_dir/repo" && git remote -v && git fetch origin &&
|
||||||
|
git reset --hard origin/$(git remote show origin | sed -n '/HEAD branch/s/.*: //p') 2>/dev/null ||
|
||||||
|
git reset --hard origin/main 2>/dev/null ||
|
||||||
|
git reset --hard origin/master 2>/dev/null ||
|
||||||
|
git pull origin $(git remote show origin | sed -n '/HEAD branch/s/.*: //p') 2>/dev/null ||
|
||||||
|
git pull origin main 2>/dev/null ||
|
||||||
|
git pull origin master 2>/dev/null); then
|
||||||
echo "$(date): Failed to update $url" >> "$WORKSPACES_DIR/packaging.log"
|
echo "$(date): Failed to update $url" >> "$WORKSPACES_DIR/packaging.log"
|
||||||
update_status "$repo_name" "🔄 IN PROGRESS" "Repo update failed, will retry with fresh clone"
|
update_status "$repo_name" "🔄 IN PROGRESS" "Repo update failed, will retry with fresh clone"
|
||||||
# Remove the repo and try to clone again
|
# Remove the repo and try to clone again
|
||||||
@@ -300,18 +366,31 @@ run_packaging_script() {
|
|||||||
|
|
||||||
if [ $success -eq 0 ]; then
|
if [ $success -eq 0 ]; then
|
||||||
# Mark as failed and create human help request with more detailed information
|
# Mark as failed and create human help request with more detailed information
|
||||||
local error_details=$(cat "$workspace_dir/error.log" | head -20 | sed 's/"/\\"/g' | tr '\n' ' ')
|
local error_details=""
|
||||||
|
if [ -f "$workspace_dir/error.log" ]; then
|
||||||
|
error_details=$(cat "$workspace_dir/error.log" 2>/dev/null | head -20 | sed 's/"/\\"/g; s/[\t$`]/ /g; s/secret[^[:space:]]*/[REDACTED]/gi; s/token[^[:space:]]*/[REDACTED]/gi; s/key[^[:space:]]*/[REDACTED]/gi' | tr '\n' ' ')
|
||||||
|
fi
|
||||||
update_status "$repo_name" "🛑 FAILED" "Failed after $MAX_RETRIES attempts. Error: $error_details"
|
update_status "$repo_name" "🛑 FAILED" "Failed after $MAX_RETRIES attempts. Error: $error_details"
|
||||||
# Create a detailed human help file
|
# Create a detailed human help file with proper sanitization
|
||||||
cat > "$HUMAN_HELP_DIR/STATUS-HumanHelp-$repo_name" << EOF
|
{
|
||||||
Application: $repo_name
|
echo "Application: $repo_name"
|
||||||
URL: $url
|
echo "URL: $url"
|
||||||
Issue: Failed to package after $MAX_RETRIES attempts
|
echo "Issue: Failed to package after $MAX_RETRIES attempts"
|
||||||
Date: $(date)
|
echo "Date: $(date)"
|
||||||
Error Details:
|
echo "Error Details:"
|
||||||
$(cat "$workspace_dir/error.log")
|
if [ -f "$workspace_dir/error.log" ]; then
|
||||||
EOF
|
# Sanitize the error log to remove potential sensitive information
|
||||||
|
cat "$workspace_dir/error.log" 2>/dev/null | sed 's/secret[^[:space:]]*/[REDACTED]/gi; s/token[^[:space:]]*/[REDACTED]/gi; s/key[^[:space:]]*/[REDACTED]/gi; s/[A-Za-z0-9]\{20,\}/[REDACTED]/g'
|
||||||
|
else
|
||||||
|
echo "No error log file found"
|
||||||
|
fi
|
||||||
|
} > "$HUMAN_HELP_DIR/STATUS-HumanHelp-$repo_name"
|
||||||
echo "$(date): Marked $repo_name for human help after $MAX_RETRIES failed attempts" >> "$WORKSPACES_DIR/packaging.log"
|
echo "$(date): Marked $repo_name for human help after $MAX_RETRIES failed attempts" >> "$WORKSPACES_DIR/packaging.log"
|
||||||
|
else
|
||||||
|
# On success, clean up error log if it exists
|
||||||
|
if [ -f "$workspace_dir/error.log" ]; then
|
||||||
|
rm -f "$workspace_dir/error.log"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -460,6 +539,11 @@ main() {
|
|||||||
|
|
||||||
# Process applications in batches of 3 for parallel execution
|
# Process applications in batches of 3 for parallel execution
|
||||||
local i=0
|
local i=0
|
||||||
|
local batch_count=0
|
||||||
|
|
||||||
|
# Add heartbeat file to track process is alive
|
||||||
|
local heartbeat_file="$WORKSPACES_DIR/process-heartbeat-$(date +%s).tmp"
|
||||||
|
touch "$heartbeat_file"
|
||||||
|
|
||||||
while [ $i -lt $total ]; do
|
while [ $i -lt $total ]; do
|
||||||
# Process up to 3 applications in parallel
|
# Process up to 3 applications in parallel
|
||||||
@@ -476,9 +560,31 @@ main() {
|
|||||||
# Wait for all background processes to complete
|
# Wait for all background processes to complete
|
||||||
wait
|
wait
|
||||||
|
|
||||||
|
# Update heartbeat to show process is active
|
||||||
|
touch "$heartbeat_file"
|
||||||
|
|
||||||
# Perform audit after each batch
|
# Perform audit after each batch
|
||||||
perform_audit
|
perform_audit
|
||||||
|
|
||||||
|
# Perform resource cleanup every 10 batches to prevent resource exhaustion during long runs
|
||||||
|
((batch_count++))
|
||||||
|
if [ $((batch_count % 10)) -eq 0 ]; then
|
||||||
|
log_message "INFO" "Performing periodic resource cleanup after batch $batch_count"
|
||||||
|
cleanup_docker_resources
|
||||||
|
cleanup_file_resources
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for critical errors that might require stopping
|
||||||
|
local failed_count_current=$(grep -o "🛑 FAILED" "$STATUS_FILE" | wc -l)
|
||||||
|
local total_failed_since_start=$((failed_count_current))
|
||||||
|
|
||||||
|
# Optional: Add logic for stopping if too many failures occur in a row
|
||||||
|
# This is commented out but can be enabled if needed
|
||||||
|
# if [ $total_failed_since_start -gt 50 ]; then
|
||||||
|
# log_message "ERROR" "Too many failures (${total_failed_since_start}), stopping process"
|
||||||
|
# break
|
||||||
|
# fi
|
||||||
|
|
||||||
# Update i for the next batch
|
# Update i for the next batch
|
||||||
i=$end
|
i=$end
|
||||||
|
|
||||||
@@ -509,6 +615,9 @@ $(date)
|
|||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Final cleanup
|
||||||
|
rm -f "$heartbeat_file" 2>/dev/null || true
|
||||||
|
|
||||||
# Final audit
|
# Final audit
|
||||||
perform_audit
|
perform_audit
|
||||||
log_message "INFO" "Completed Cloudron packaging process"
|
log_message "INFO" "Completed Cloudron packaging process"
|
||||||
|
|||||||
@@ -26,6 +26,38 @@ package_nodejs_app() {
|
|||||||
repo_path="unknown-user/$app_name"
|
repo_path="unknown-user/$app_name"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create .dockerignore to exclude sensitive files
|
||||||
|
cat > .dockerignore << 'DOCKERIGNORE_EOF'
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
*.env
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
*.log
|
||||||
|
node_modules
|
||||||
|
__pycache__
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
README.md
|
||||||
|
CHANGELOG.md
|
||||||
|
LICENSE
|
||||||
|
AUTHORS
|
||||||
|
CONTRIBUTORS
|
||||||
|
config/
|
||||||
|
secrets/
|
||||||
|
tokens/
|
||||||
|
DOCKERIGNORE_EOF
|
||||||
|
|
||||||
# Create Cloudron manifest
|
# Create Cloudron manifest
|
||||||
cat > app.manifest << EOF
|
cat > app.manifest << EOF
|
||||||
{
|
{
|
||||||
@@ -87,8 +119,8 @@ EXPOSE $port
|
|||||||
CMD $start_cmd
|
CMD $start_cmd
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Build Docker image
|
# Build Docker image with a more unique name to avoid conflicts in parallel execution
|
||||||
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}:latest"
|
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}-$(date +%s%N | cut -c1-10):latest"
|
||||||
if ! docker build -t "$docker_image" .; then
|
if ! docker build -t "$docker_image" .; then
|
||||||
echo "Failed to build Docker image for $app_name"
|
echo "Failed to build Docker image for $app_name"
|
||||||
return 1
|
return 1
|
||||||
@@ -101,7 +133,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Save the Docker image as an artifact
|
# Save the Docker image as an artifact
|
||||||
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}.tar.gz"
|
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}-$(date +%s).tar.gz"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,6 +158,38 @@ package_python_app() {
|
|||||||
repo_path="unknown-user/$app_name"
|
repo_path="unknown-user/$app_name"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create .dockerignore to exclude sensitive files
|
||||||
|
cat > .dockerignore << 'DOCKERIGNORE_EOF'
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
*.env
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
*.log
|
||||||
|
node_modules
|
||||||
|
__pycache__
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
README.md
|
||||||
|
CHANGELOG.md
|
||||||
|
LICENSE
|
||||||
|
AUTHORS
|
||||||
|
CONTRIBUTORS
|
||||||
|
config/
|
||||||
|
secrets/
|
||||||
|
tokens/
|
||||||
|
DOCKERIGNORE_EOF
|
||||||
|
|
||||||
# Create Cloudron manifest
|
# Create Cloudron manifest
|
||||||
cat > app.manifest << EOF
|
cat > app.manifest << EOF
|
||||||
{
|
{
|
||||||
@@ -187,8 +251,8 @@ EXPOSE $port
|
|||||||
CMD $start_cmd
|
CMD $start_cmd
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Build Docker image
|
# Build Docker image with a more unique name to avoid conflicts in parallel execution
|
||||||
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}:latest"
|
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}-$(date +%s%N | cut -c1-10):latest"
|
||||||
if ! docker build -t "$docker_image" .; then
|
if ! docker build -t "$docker_image" .; then
|
||||||
echo "Failed to build Docker image for $app_name"
|
echo "Failed to build Docker image for $app_name"
|
||||||
return 1
|
return 1
|
||||||
@@ -201,7 +265,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Save the Docker image as an artifact
|
# Save the Docker image as an artifact
|
||||||
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}.tar.gz"
|
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}-$(date +%s).tar.gz"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,6 +290,38 @@ package_php_app() {
|
|||||||
repo_path="unknown-user/$app_name"
|
repo_path="unknown-user/$app_name"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create .dockerignore to exclude sensitive files
|
||||||
|
cat > .dockerignore << 'DOCKERIGNORE_EOF'
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
*.env
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
*.log
|
||||||
|
node_modules
|
||||||
|
__pycache__
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
README.md
|
||||||
|
CHANGELOG.md
|
||||||
|
LICENSE
|
||||||
|
AUTHORS
|
||||||
|
CONTRIBUTORS
|
||||||
|
config/
|
||||||
|
secrets/
|
||||||
|
tokens/
|
||||||
|
DOCKERIGNORE_EOF
|
||||||
|
|
||||||
# Create Cloudron manifest
|
# Create Cloudron manifest
|
||||||
cat > app.manifest << EOF
|
cat > app.manifest << EOF
|
||||||
{
|
{
|
||||||
@@ -267,8 +363,8 @@ EXPOSE 80
|
|||||||
CMD ["apache2-foreground"]
|
CMD ["apache2-foreground"]
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Build Docker image
|
# Build Docker image with a more unique name to avoid conflicts in parallel execution
|
||||||
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}:latest"
|
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}-$(date +%s%N | cut -c1-10):latest"
|
||||||
if ! docker build -t "$docker_image" .; then
|
if ! docker build -t "$docker_image" .; then
|
||||||
echo "Failed to build Docker image for $app_name"
|
echo "Failed to build Docker image for $app_name"
|
||||||
return 1
|
return 1
|
||||||
@@ -281,7 +377,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Save the Docker image as an artifact
|
# Save the Docker image as an artifact
|
||||||
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}.tar.gz"
|
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}-$(date +%s).tar.gz"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,6 +402,38 @@ package_go_app() {
|
|||||||
repo_path="unknown-user/$app_name"
|
repo_path="unknown-user/$app_name"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create .dockerignore to exclude sensitive files
|
||||||
|
cat > .dockerignore << 'DOCKERIGNORE_EOF'
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
*.env
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
*.log
|
||||||
|
node_modules
|
||||||
|
__pycache__
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
README.md
|
||||||
|
CHANGELOG.md
|
||||||
|
LICENSE
|
||||||
|
AUTHORS
|
||||||
|
CONTRIBUTORS
|
||||||
|
config/
|
||||||
|
secrets/
|
||||||
|
tokens/
|
||||||
|
DOCKERIGNORE_EOF
|
||||||
|
|
||||||
# Create Cloudron manifest
|
# Create Cloudron manifest
|
||||||
cat > app.manifest << EOF
|
cat > app.manifest << EOF
|
||||||
{
|
{
|
||||||
@@ -358,8 +486,8 @@ EXPOSE 8080
|
|||||||
CMD ["./$binary_name"]
|
CMD ["./$binary_name"]
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Build Docker image
|
# Build Docker image with a more unique name to avoid conflicts in parallel execution
|
||||||
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}:latest"
|
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}-$(date +%s%N | cut -c1-10):latest"
|
||||||
if ! docker build -t "$docker_image" .; then
|
if ! docker build -t "$docker_image" .; then
|
||||||
echo "Failed to build Docker image for $app_name"
|
echo "Failed to build Docker image for $app_name"
|
||||||
return 1
|
return 1
|
||||||
@@ -372,7 +500,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Save the Docker image as an artifact
|
# Save the Docker image as an artifact
|
||||||
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}.tar.gz"
|
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}-$(date +%s).tar.gz"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -389,22 +517,39 @@ smoke_test_docker_image() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run the container briefly to test if it starts correctly
|
# Sanitize the app name for container name
|
||||||
local container_name="smoke-test-${app_name//[^a-zA-Z0-9]/-}-$(date +%s)"
|
local clean_app_name=$(printf '%s\n' "$app_name" | sed 's/[^a-zA-Z0-9]/-/g' | tr -cd '[:alnum:]-')
|
||||||
|
local container_name="smoke-test-${clean_app_name:0:50}-$(date +%s)"
|
||||||
|
|
||||||
|
# Validate container name doesn't exceed Docker limits
|
||||||
|
if [ ${#container_name} -gt 63 ]; then
|
||||||
|
container_name="${container_name:0:63}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Run without specific health check initially, just see if container starts and stays running
|
# Run without specific health check initially, just see if container starts and stays running
|
||||||
if ! docker run -d --name "$container_name" "$docker_image" >/dev/null 2>&1; then
|
if ! docker run -d --name "$container_name" "$docker_image" >/dev/null 2>&1; then
|
||||||
echo "Failed to start container for $app_name during smoke test"
|
echo "Failed to start container for $app_name during smoke test"
|
||||||
docker rm "$container_name" >/dev/null 2>&1 || true
|
# Remove container in case it was partially created
|
||||||
|
docker rm -f "$container_name" >/dev/null 2>&1 || true
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Wait a few seconds to see if the container stays running
|
# Give the container time to start - wait with periodic checks
|
||||||
sleep 15
|
local max_wait=30 # Maximum wait time in seconds
|
||||||
|
local waited=0
|
||||||
|
local container_status="not_started"
|
||||||
|
|
||||||
# Check if the container is still running
|
while [ $waited -lt $max_wait ]; do
|
||||||
local container_status
|
container_status=$(docker inspect -f '{{.State.Status}}' "$container_name" 2>/dev/null || echo "not_found")
|
||||||
container_status=$(docker inspect -f '{{.State.Status}}' "$container_name" 2>/dev/null || echo "not_found")
|
if [ "$container_status" = "running" ]; then
|
||||||
|
break
|
||||||
|
elif [ "$container_status" = "exited" ] || [ "$container_status" = "dead" ]; then
|
||||||
|
# Container exited early, no need to wait longer
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
waited=$((waited + 2))
|
||||||
|
done
|
||||||
|
|
||||||
if [ "$container_status" = "running" ]; then
|
if [ "$container_status" = "running" ]; then
|
||||||
echo "Smoke test passed for $app_name - container is running"
|
echo "Smoke test passed for $app_name - container is running"
|
||||||
@@ -414,11 +559,11 @@ smoke_test_docker_image() {
|
|||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
# Container stopped or crashed, get logs for debugging
|
# Container stopped or crashed, get logs for debugging
|
||||||
echo "Container for $app_name did not stay running during smoke test (status: $container_status)"
|
echo "Container for $app_name did not stay running during smoke test (status: $container_status after ${waited}s)"
|
||||||
echo "Container logs:"
|
echo "Container logs:"
|
||||||
docker logs "$container_name" 2>&1 | head -30
|
docker logs "$container_name" 2>/dev/null | head -30 || echo "Could not retrieve container logs"
|
||||||
docker stop "$container_name" >/dev/null 2>&1 || true
|
# Force remove the container
|
||||||
docker rm "$container_name" >/dev/null 2>&1 || true
|
docker rm -f "$container_name" >/dev/null 2>&1 || true
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -461,6 +606,38 @@ package_generic_app() {
|
|||||||
|
|
||||||
cd "$app_dir"
|
cd "$app_dir"
|
||||||
|
|
||||||
|
# Create .dockerignore to exclude sensitive files
|
||||||
|
cat > .dockerignore << 'DOCKERIGNORE_EOF'
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
*.env
|
||||||
|
*.key
|
||||||
|
*.pem
|
||||||
|
*.crt
|
||||||
|
*.cert
|
||||||
|
Dockerfile
|
||||||
|
.dockerignore
|
||||||
|
*.log
|
||||||
|
node_modules
|
||||||
|
__pycache__
|
||||||
|
.pytest_cache
|
||||||
|
.coverage
|
||||||
|
.vscode
|
||||||
|
.idea
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
README.md
|
||||||
|
CHANGELOG.md
|
||||||
|
LICENSE
|
||||||
|
AUTHORS
|
||||||
|
CONTRIBUTORS
|
||||||
|
config/
|
||||||
|
secrets/
|
||||||
|
tokens/
|
||||||
|
DOCKERIGNORE_EOF
|
||||||
|
|
||||||
# Extract username/repo from the app_url for manifest
|
# Extract username/repo from the app_url for manifest
|
||||||
local repo_path
|
local repo_path
|
||||||
if [[ "$app_url" == *"github.com"* ]]; then
|
if [[ "$app_url" == *"github.com"* ]]; then
|
||||||
@@ -565,8 +742,8 @@ EXPOSE 8080
|
|||||||
CMD ["/run-app.sh"]
|
CMD ["/run-app.sh"]
|
||||||
DOCKERFILE_EOF
|
DOCKERFILE_EOF
|
||||||
|
|
||||||
# Build Docker image
|
# Build Docker image with a more unique name to avoid conflicts in parallel execution
|
||||||
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}:latest"
|
local docker_image="tsysdevstack-cloudron-buildtest-${app_name//[^a-zA-Z0-9]/-}-$(date +%s%N | cut -c1-10):latest"
|
||||||
if ! docker build -t "$docker_image" .; then
|
if ! docker build -t "$docker_image" .; then
|
||||||
echo "Failed to build Docker image for $app_name"
|
echo "Failed to build Docker image for $app_name"
|
||||||
return 1
|
return 1
|
||||||
@@ -579,6 +756,6 @@ DOCKERFILE_EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Save the Docker image as an artifact
|
# Save the Docker image as an artifact
|
||||||
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}.tar.gz"
|
docker save "$docker_image" | gzip > "$artifact_dir/${app_name//[^a-zA-Z0-9]/-}-$(date +%s).tar.gz"
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
24
CloudronStack/test_add_url.sh
Executable file
24
CloudronStack/test_add_url.sh
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Test script to verify the add_git_url functionality
|
||||||
|
|
||||||
|
# Source the master script to get access to its functions
|
||||||
|
source /home/localuser/TSYSDevStack/CloudronStack/output/master-control-script.sh
|
||||||
|
|
||||||
|
# Test adding a new URL
|
||||||
|
echo "Testing add_git_url function..."
|
||||||
|
add_git_url "https://github.com/testuser/testrepo"
|
||||||
|
|
||||||
|
# Check the git URL list file to see if the URL was added
|
||||||
|
echo "Contents of GitUrlList.txt after adding:"
|
||||||
|
cat /home/localuser/TSYSDevStack/CloudronStack/collab/GitUrlList.txt
|
||||||
|
|
||||||
|
# Test adding the same URL again (should not duplicate)
|
||||||
|
echo "Testing adding the same URL again (should not duplicate)..."
|
||||||
|
add_git_url "https://github.com/testuser/testrepo"
|
||||||
|
|
||||||
|
# Add another URL for good measure
|
||||||
|
echo "Testing adding a second URL..."
|
||||||
|
add_git_url "https://github.com/anotheruser/anotherrepo"
|
||||||
|
|
||||||
|
echo "Test completed successfully!"
|
||||||
@@ -69,7 +69,7 @@ RUN curl -sSfL https://raw.githubusercontent.com/aquaproj/aqua-installer/v2.3.1/
|
|||||||
RUN curl -sSfL https://mise.jdx.dev/install.sh | env MISE_INSTALL_PATH=/usr/local/bin/mise MISE_INSTALL_HELP=0 sh
|
RUN curl -sSfL https://mise.jdx.dev/install.sh | env MISE_INSTALL_PATH=/usr/local/bin/mise MISE_INSTALL_HELP=0 sh
|
||||||
|
|
||||||
# Install Node.js via mise to enable npm package installation
|
# Install Node.js via mise to enable npm package installation
|
||||||
RUN mise install node@lts && mise global node@lts
|
RUN mise install node@22.13.0 && mise global node@22.13.0
|
||||||
|
|
||||||
# Create non-root user with matching UID/GID for host mapping
|
# Create non-root user with matching UID/GID for host mapping
|
||||||
RUN if getent passwd "${USER_ID}" >/dev/null; then \
|
RUN if getent passwd "${USER_ID}" >/dev/null; then \
|
||||||
@@ -100,20 +100,21 @@ RUN su - "${USERNAME}" -c 'git clone --depth=1 https://github.com/ohmyzsh/ohmyzs
|
|||||||
&& su - "${USERNAME}" -c 'printf "\nset -gx AQUA_GLOBAL_CONFIG \$HOME/.config/aquaproj-aqua/aqua.yaml\n# Shell prompt and runtime manager\nstarship init fish | source\nmise activate fish | source\ndirenv hook fish | source\nzoxide init fish | source\n" >> ~/.config/fish/config.fish'
|
&& su - "${USERNAME}" -c 'printf "\nset -gx AQUA_GLOBAL_CONFIG \$HOME/.config/aquaproj-aqua/aqua.yaml\n# Shell prompt and runtime manager\nstarship init fish | source\nmise activate fish | source\ndirenv hook fish | source\nzoxide init fish | source\n" >> ~/.config/fish/config.fish'
|
||||||
|
|
||||||
# Install Node.js for the toolbox user and set up the environment
|
# Install Node.js for the toolbox user and set up the environment
|
||||||
RUN su - "${USERNAME}" -c 'mise install node@lts && mise use -g node@lts'
|
RUN su - "${USERNAME}" -c 'mise install node@22.13.0 && mise use -g node@22.13.0'
|
||||||
|
|
||||||
COPY aqua.yaml /tmp/aqua.yaml
|
COPY aqua.yaml /tmp/aqua.yaml
|
||||||
|
|
||||||
|
# Install aqua packages at both root and user level to ensure they're baked into the image
|
||||||
RUN chown "${USER_ID}:${GROUP_ID}" /tmp/aqua.yaml \
|
RUN chown "${USER_ID}:${GROUP_ID}" /tmp/aqua.yaml \
|
||||||
&& su - "${USERNAME}" -c 'mkdir -p ~/.config/aquaproj-aqua' \
|
&& su - "${USERNAME}" -c 'mkdir -p ~/.config/aquaproj-aqua' \
|
||||||
&& su - "${USERNAME}" -c 'cp /tmp/aqua.yaml ~/.config/aquaproj-aqua/aqua.yaml' \
|
&& su - "${USERNAME}" -c 'cp /tmp/aqua.yaml ~/.config/aquaproj-aqua/aqua.yaml' \
|
||||||
&& su - "${USERNAME}" -c 'AQUA_GLOBAL_CONFIG=~/.config/aquaproj-aqua/aqua.yaml aqua install'
|
&& AQUA_GLOBAL_CONFIG=/tmp/aqua.yaml aqua install
|
||||||
|
|
||||||
# Install AI CLI tools via npm using mise to ensure Node.js is available
|
# Install AI CLI tools via npm using mise to ensure Node.js is available
|
||||||
RUN mise exec -- npm install -g @just-every/code @qwen-code/qwen-code @google/gemini-cli @openai/codex opencode-ai@latest
|
RUN mise exec -- npm install -g @just-every/code@0.4.6 @qwen-code/qwen-code@0.1.1 @google/gemini-cli@0.11.0 @openai/codex@0.50.0 opencode-ai@0.15.29
|
||||||
|
|
||||||
# Install the same AI CLI tools for the toolbox user so they are available in the container runtime
|
# Install the same AI CLI tools for the toolbox user so they are available in the container runtime
|
||||||
RUN su - "${USERNAME}" -c 'mise exec -- npm install -g @just-every/code @qwen-code/qwen-code @google/gemini-cli @openai/codex opencode-ai@latest' && \
|
RUN su - "${USERNAME}" -c 'mise exec -- npm install -g @just-every/code@0.4.6 @qwen-code/qwen-code@0.1.1 @google/gemini-cli@0.11.0 @openai/codex@0.50.0 opencode-ai@0.15.29' && \
|
||||||
# Ensure mise shims are properly generated for the installed tools
|
# Ensure mise shims are properly generated for the installed tools
|
||||||
su - "${USERNAME}" -c 'mise reshim'
|
su - "${USERNAME}" -c 'mise reshim'
|
||||||
|
|
||||||
|
|||||||
@@ -8,9 +8,11 @@ Context snapshot (toolbox-base):
|
|||||||
|
|
||||||
Current state:
|
Current state:
|
||||||
- Dockerfile installs shell tooling (zsh/bash/fish with Starship & oh-my-zsh), core CLI utilities (curl, wget, git, tmux, screen, htop, btop, entr, httpie, tea, bc, etc.), build-essential + headers, aqua, and mise. Aqua is pinned to specific versions for gh, lazygit, direnv, git-delta, zoxide, just, yq, xh, curlie, chezmoi, shfmt, shellcheck, hadolint, uv, watchexec; direnv/zoxide hooks are enabled for all shells (direnv logging muted).
|
- Dockerfile installs shell tooling (zsh/bash/fish with Starship & oh-my-zsh), core CLI utilities (curl, wget, git, tmux, screen, htop, btop, entr, httpie, tea, bc, etc.), build-essential + headers, aqua, and mise. Aqua is pinned to specific versions for gh, lazygit, direnv, git-delta, zoxide, just, yq, xh, curlie, chezmoi, shfmt, shellcheck, hadolint, uv, watchexec; direnv/zoxide hooks are enabled for all shells (direnv logging muted).
|
||||||
- aqua-managed CLI inventory lives in README.md alongside usage notes; tea installs via direct download with checksum verification (TEA_VERSION build arg).
|
- aqua-managed CLI inventory lives in README.md alongside usage notes; tea installs via direct download with checksum verification (TEA_VERSION build arg).
|
||||||
- mise handles language/tool runtimes; activation wired into zsh, bash, and fish.
|
- aqua packages are baked into the image during the build process for consistency, reproducibility and performance.
|
||||||
- AI CLI tools (just-every/code, QwenLM/qwen-code, google-gemini/gemini-cli, openai/codex, sst/opencode) are installed via npm and available in the PATH.
|
- mise handles language/tool runtimes; activation wired into zsh, bash, and fish. Node.js is pinned to version 22.13.0 for build consistency.
|
||||||
|
- AI CLI tools (just-every/code, QwenLM/qwen-code, google-gemini/gemini-cli, openai/codex, sst/opencode) are installed via npm and baked into the image with pinned versions.
|
||||||
|
- Host directories for AI tool configuration and cache are mounted to maintain persistent settings across container runs.
|
||||||
- docker-compose.yml runs container with host UID/GID, `sleep infinity`, and docker socket mount; run via run.sh/build.sh. Host directories `~/.local/share/mise` and `~/.cache/mise` are mounted for persistent runtimes.
|
- docker-compose.yml runs container with host UID/GID, `sleep infinity`, and docker socket mount; run via run.sh/build.sh. Host directories `~/.local/share/mise` and `~/.cache/mise` are mounted for persistent runtimes.
|
||||||
- Devcontainer config ( .devcontainer/devcontainer.json ) references the compose service.
|
- Devcontainer config ( .devcontainer/devcontainer.json ) references the compose service.
|
||||||
- Documentation: README.md (tooling inventory & workflow) and this PROMPT must stay current, and both should stay aligned with the shared guidance in ../PROMPT. README also notes that build.sh now uses docker buildx with a local cache directory and documents the `dev` → `release-current` → semantic tagging workflow.
|
- Documentation: README.md (tooling inventory & workflow) and this PROMPT must stay current, and both should stay aligned with the shared guidance in ../PROMPT. README also notes that build.sh now uses docker buildx with a local cache directory and documents the `dev` → `release-current` → semantic tagging workflow.
|
||||||
|
|||||||
@@ -46,9 +46,10 @@ The compose service mounts the current repo to `/workspace` (read/write) and run
|
|||||||
| **Shells & Prompts** | 🐚 `zsh` • 🐟 `fish` • 🧑💻 `bash` • ⭐ `starship` • 💎 `oh-my-zsh` | Starship prompt enabled for all shells; oh-my-zsh configured with `git` + `fzf` plugins. |
|
| **Shells & Prompts** | 🐚 `zsh` • 🐟 `fish` • 🧑💻 `bash` • ⭐ `starship` • 💎 `oh-my-zsh` | Starship prompt enabled for all shells; oh-my-zsh configured with `git` + `fzf` plugins. |
|
||||||
| **Runtime & CLI Managers** | 🪄 `mise` • 💧 `aqua` | `mise` handles language/tool runtimes (activation wired into zsh/bash/fish); `aqua` manages standalone CLIs with config at `~/.config/aquaproj-aqua/aqua.yaml`. |
|
| **Runtime & CLI Managers** | 🪄 `mise` • 💧 `aqua` | `mise` handles language/tool runtimes (activation wired into zsh/bash/fish); `aqua` manages standalone CLIs with config at `~/.config/aquaproj-aqua/aqua.yaml`. |
|
||||||
| **Core CLI Utilities** | 📦 `curl` • 📥 `wget` • 🔐 `ca-certificates` • 🧭 `git` • 🔧 `build-essential` + headers (`pkg-config`, `libssl-dev`, `zlib1g-dev`, `libffi-dev`, `libsqlite3-dev`, `libreadline-dev`, `make`) • 🔍 `ripgrep` • 🧭 `fzf` • 📁 `fd` • 📖 `bat` • 🔗 `openssh-client` • 🧵 `tmux` • 🖥️ `screen` • 📈 `htop` • 📉 `btop` • ♻️ `entr` • 📊 `jq` • 🌐 `httpie` • ☕ `tea` • 🧮 `bc` | Provides ergonomic defaults plus toolchain deps for compiling runtimes (no global language installs). |
|
| **Core CLI Utilities** | 📦 `curl` • 📥 `wget` • 🔐 `ca-certificates` • 🧭 `git` • 🔧 `build-essential` + headers (`pkg-config`, `libssl-dev`, `zlib1g-dev`, `libffi-dev`, `libsqlite3-dev`, `libreadline-dev`, `make`) • 🔍 `ripgrep` • 🧭 `fzf` • 📁 `fd` • 📖 `bat` • 🔗 `openssh-client` • 🧵 `tmux` • 🖥️ `screen` • 📈 `htop` • 📉 `btop` • ♻️ `entr` • 📊 `jq` • 🌐 `httpie` • ☕ `tea` • 🧮 `bc` | Provides ergonomic defaults plus toolchain deps for compiling runtimes (no global language installs). |
|
||||||
| **Aqua-Managed CLIs** | 🐙 `gh` • 🌀 `lazygit` • 🪄 `direnv` • 🎨 `git-delta` • 🧭 `zoxide` • 🧰 `just` • 🧾 `yq` • ⚡ `xh` • 🌍 `curlie` • 🏠 `chezmoi` • 🛠️ `shfmt` • ✅ `shellcheck` • 🐳 `hadolint` • 🐍 `uv` • 🔁 `watchexec` | Extend via `~/.config/aquaproj-aqua/aqua.yaml` and run `aqua install`. Direnv logging is muted and hooks for direnv/zoxide are pre-configured for zsh, bash, and fish. |
|
| **Aqua-Managed CLIs** | 🐙 `gh` • 🌀 `lazygit` • 🪄 `direnv` • 🎨 `git-delta` • 🧭 `zoxide` • 🧰 `just` • 🧾 `yq` • ⚡ `xh` • 🌍 `curlie` • 🏠 `chezmoi` • 🛠️ `shfmt` • ✅ `shellcheck` • 🐳 `hadolint` • 🐍 `uv` • 🔁 `watchexec` | Extend via `~/.config/aquaproj-aqua/aqua.yaml`. These packages are baked into the image at build time for consistency and reproducibility. Direnv logging is muted and hooks for direnv/zoxide are pre-configured for zsh, bash, and fish. |
|
||||||
| **AI CLI Tools** | 🧠 `@just-every/code` • 🤖 `@qwen-code/qwen-code` • 💎 `@google/gemini-cli` • 🔮 `@openai/codex` • 🌐 `opencode-ai` | AI-powered command-line tools for enhanced development workflows. Node.js is installed via mise to support npm package installation. |
|
| **AI CLI Tools** | 🧠 `@just-every/code` • 🤖 `@qwen-code/qwen-code` • 💎 `@google/gemini-cli` • 🔮 `@openai/codex` • 🌐 `opencode-ai` | AI-powered command-line tools for enhanced development workflows. Node.js is installed via mise to support npm package installation. |
|
||||||
| **Container Workflow** | 🐳 Docker socket mount (`/var/run/docker.sock`) | Enables Docker CLIs inside the container; host Docker daemon required. |
|
| **Container Workflow** | 🐳 Docker socket mount (`/var/run/docker.sock`) | Enables Docker CLIs inside the container; host Docker daemon required. |
|
||||||
|
| **AI Tool Configuration** | 🧠 Host directories for AI tools | Host directories for AI tool configuration and cache are mounted to maintain persistent settings and data across container runs. |
|
||||||
| **Runtime Environment** | 👤 Non-root user `toolbox` (UID/GID mapped) • 🗂️ `/workspace` mount | Maintains host permissions and isolates artifacts under `artifacts/ToolboxStack/toolbox-base`. |
|
| **Runtime Environment** | 👤 Non-root user `toolbox` (UID/GID mapped) • 🗂️ `/workspace` mount | Maintains host permissions and isolates artifacts under `artifacts/ToolboxStack/toolbox-base`. |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|||||||
@@ -2,6 +2,17 @@
|
|||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Validate dependencies
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "Error: docker is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker buildx version &> /dev/null; then
|
||||||
|
echo "Error: docker buildx is required but not available." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
IMAGE_NAME="tsysdevstack-toolboxstack-toolbox-base"
|
IMAGE_NAME="tsysdevstack-toolboxstack-toolbox-base"
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
@@ -20,13 +31,16 @@ echo "Building ${IMAGE_NAME} with UID=${USER_ID} GID=${GROUP_ID} USERNAME=${USER
|
|||||||
echo "Primary tag: ${TAG}"
|
echo "Primary tag: ${TAG}"
|
||||||
|
|
||||||
if ! docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
if ! docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
||||||
|
echo "Creating builder: ${BUILDER_NAME}"
|
||||||
docker buildx create --driver docker-container --name "${BUILDER_NAME}" --use >/dev/null
|
docker buildx create --driver docker-container --name "${BUILDER_NAME}" --use >/dev/null
|
||||||
else
|
else
|
||||||
|
echo "Using existing builder: ${BUILDER_NAME}"
|
||||||
docker buildx use "${BUILDER_NAME}" >/dev/null
|
docker buildx use "${BUILDER_NAME}" >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p "${CACHE_DIR}"
|
mkdir -p "${CACHE_DIR}"
|
||||||
|
|
||||||
|
echo "Starting build..."
|
||||||
docker buildx build \
|
docker buildx build \
|
||||||
--builder "${BUILDER_NAME}" \
|
--builder "${BUILDER_NAME}" \
|
||||||
--load \
|
--load \
|
||||||
@@ -56,3 +70,13 @@ if [[ "${PUSH}" == "true" ]]; then
|
|||||||
docker push "${IMAGE_NAME}:${RELEASE_TAG}"
|
docker push "${IMAGE_NAME}:${RELEASE_TAG}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Build completed successfully."
|
||||||
|
|
||||||
|
# Run security scan if TRIVY is available
|
||||||
|
if command -v trivy &> /dev/null; then
|
||||||
|
echo "Running security scan with Trivy..."
|
||||||
|
trivy image --exit-code 0 --severity HIGH,CRITICAL "${IMAGE_NAME}:${TAG}"
|
||||||
|
else
|
||||||
|
echo "Trivy not found. Install Trivy to perform security scanning."
|
||||||
|
fi
|
||||||
|
|||||||
@@ -18,3 +18,14 @@ services:
|
|||||||
- .:/workspace:rw
|
- .:/workspace:rw
|
||||||
- ${HOME}/.local/share/mise:/home/toolbox/.local/share/mise:rw
|
- ${HOME}/.local/share/mise:/home/toolbox/.local/share/mise:rw
|
||||||
- ${HOME}/.cache/mise:/home/toolbox/.cache/mise:rw
|
- ${HOME}/.cache/mise:/home/toolbox/.cache/mise:rw
|
||||||
|
# AI CLI tool configuration and cache directories
|
||||||
|
- ${HOME}/.config/openai:/home/toolbox/.config/openai:rw
|
||||||
|
- ${HOME}/.config/gemini:/home/toolbox/.config/gemini:rw
|
||||||
|
- ${HOME}/.config/qwen:/home/toolbox/.config/qwen:rw
|
||||||
|
- ${HOME}/.config/code:/home/toolbox/.config/code:rw
|
||||||
|
- ${HOME}/.config/opencode:/home/toolbox/.config/opencode:rw
|
||||||
|
- ${HOME}/.cache/openai:/home/toolbox/.cache/openai:rw
|
||||||
|
- ${HOME}/.cache/gemini:/home/toolbox/.cache/gemini:rw
|
||||||
|
- ${HOME}/.cache/qwen:/home/toolbox/.cache/qwen:rw
|
||||||
|
- ${HOME}/.cache/code:/home/toolbox/.cache/code:rw
|
||||||
|
- ${HOME}/.cache/opencode:/home/toolbox/.cache/opencode:rw
|
||||||
|
|||||||
@@ -2,6 +2,17 @@
|
|||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Validate dependencies
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "Error: docker is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v docker compose &> /dev/null; then
|
||||||
|
echo "Error: docker compose is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
COMPOSE_FILE="${SCRIPT_DIR}/docker-compose.yml"
|
COMPOSE_FILE="${SCRIPT_DIR}/docker-compose.yml"
|
||||||
|
|
||||||
@@ -19,15 +30,21 @@ ACTION="${1:-up}"
|
|||||||
shift || true
|
shift || true
|
||||||
|
|
||||||
if [[ "${ACTION}" == "up" ]]; then
|
if [[ "${ACTION}" == "up" ]]; then
|
||||||
|
# Create necessary directories for the toolbox tools
|
||||||
mkdir -p "${HOME}/.local/share/mise" "${HOME}/.cache/mise"
|
mkdir -p "${HOME}/.local/share/mise" "${HOME}/.cache/mise"
|
||||||
|
mkdir -p "${HOME}/.config" "${HOME}/.local/share"
|
||||||
|
mkdir -p "${HOME}/.cache/openai" "${HOME}/.cache/gemini" "${HOME}/.cache/qwen" "${HOME}/.cache/code" "${HOME}/.cache/opencode"
|
||||||
|
mkdir -p "${HOME}/.config/openai" "${HOME}/.config/gemini" "${HOME}/.config/qwen" "${HOME}/.config/code" "${HOME}/.config/opencode"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "${ACTION}" in
|
case "${ACTION}" in
|
||||||
up)
|
up)
|
||||||
docker compose -f "${COMPOSE_FILE}" up --build --detach "$@"
|
docker compose -f "${COMPOSE_FILE}" up --build --detach "$@"
|
||||||
|
echo "Container started. Use 'docker exec -it tsysdevstack-toolboxstack-toolbox-base zsh' to access the shell."
|
||||||
;;
|
;;
|
||||||
down)
|
down)
|
||||||
docker compose -f "${COMPOSE_FILE}" down "$@"
|
docker compose -f "${COMPOSE_FILE}" down "$@"
|
||||||
|
echo "Container stopped."
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Usage: $0 [up|down] [additional docker compose args]" >&2
|
echo "Usage: $0 [up|down] [additional docker compose args]" >&2
|
||||||
|
|||||||
25
ToolboxStack/output/toolbox-template/Dockerfile
Normal file
25
ToolboxStack/output/toolbox-template/Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Extend from the toolbox-base image
|
||||||
|
FROM tsysdevstack-toolboxstack-toolbox-base:release-current
|
||||||
|
|
||||||
|
# Set build arguments (these can be overridden at build time)
|
||||||
|
ARG USER_ID=1000
|
||||||
|
ARG GROUP_ID=1000
|
||||||
|
ARG USERNAME=toolbox
|
||||||
|
|
||||||
|
# Ensure the non-root user exists with the correct UID/GID
|
||||||
|
RUN if getent passwd "${USER_ID}" >/dev/null; then \
|
||||||
|
existing_user="$(getent passwd "${USER_ID}" | cut -d: -f1)"; \
|
||||||
|
userdel --remove "${existing_user}" 2>/dev/null || true; \
|
||||||
|
fi \
|
||||||
|
&& if ! getent group "${GROUP_ID}" >/dev/null; then \
|
||||||
|
groupadd --gid "${GROUP_ID}" "${USERNAME}"; \
|
||||||
|
fi \
|
||||||
|
&& useradd --uid "${USER_ID}" --gid "${GROUP_ID}" --shell /usr/bin/zsh --create-home "${USERNAME}" \
|
||||||
|
&& usermod -aG sudo "${USERNAME}" 2>/dev/null || true
|
||||||
|
|
||||||
|
# Switch to the non-root user
|
||||||
|
USER ${USERNAME}
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
# Default command
|
||||||
|
CMD ["/usr/bin/zsh"]
|
||||||
@@ -5,21 +5,23 @@ You are Codex, collaborating with a human on the TSYSDevStack ToolboxStack proje
|
|||||||
- Start each session by reading it (`cat SEED`) and summarize progress or adjustments here in PROMPT.
|
- Start each session by reading it (`cat SEED`) and summarize progress or adjustments here in PROMPT.
|
||||||
|
|
||||||
Context snapshot ({{toolbox_name}}):
|
Context snapshot ({{toolbox_name}}):
|
||||||
- Working directory: artifacts/ToolboxStack/{{toolbox_name}}
|
- Working directory: TSYSDevStack/ToolboxStack/{{toolbox_name}}
|
||||||
- Image: tsysdevstack-toolboxstack-{{toolbox_name}} (Ubuntu 24.04)
|
- Image: extends from tsysdevstack-toolboxstack-toolbox-base (Ubuntu 24.04 base)
|
||||||
- Container user: toolbox (non-root, UID/GID mapped to host)
|
- Container user: toolbox (non-root, UID/GID mapped to host)
|
||||||
- Mounted workspace: current repo at /workspace (rw)
|
- Mounted workspace: current repo at /workspace (rw)
|
||||||
|
|
||||||
Current state:
|
Current state:
|
||||||
- Seed items above still need to be translated into Dockerfile/tooling work.
|
- Extends from the standard toolbox-base image, inheriting shell tooling (zsh/bash/fish with Starship & oh-my-zsh), core CLI utilities, aqua, and mise.
|
||||||
|
- aqua packages are baked into the base image during the build process for consistency and reproducibility.
|
||||||
|
- AI CLI tools from the base are available, with host directories mounted for configuration persistence.
|
||||||
- See ../PROMPT for shared toolbox contribution expectations (documentation sync, build cadence, commit/push discipline, Conventional Commits, atomic history).
|
- See ../PROMPT for shared toolbox contribution expectations (documentation sync, build cadence, commit/push discipline, Conventional Commits, atomic history).
|
||||||
|
|
||||||
Collaboration checklist:
|
Collaboration checklist:
|
||||||
1. Translate SEED goals into concrete tooling decisions; mirror outcomes in README.md and this PROMPT (do not rewrite SEED unless the scope resets).
|
1. Build upon the base tooling with {{toolbox_name}}-specific additions; mirror outcomes in README.md and this PROMPT.
|
||||||
2. Prefer aqua-managed CLIs and mise-managed runtimes for reproducibility.
|
2. Prefer aqua-managed CLIs and mise-managed runtimes for reproducibility.
|
||||||
3. After each tooling change, update README/PROMPT, run ./build.sh, commit (Conventional Commit message, focused diff), and push only once the build succeeds per ../PROMPT.
|
3. After each tooling change, update README/PROMPT, run ./build.sh, commit (Conventional Commit message, focused diff), and push only once the build succeeds per ../PROMPT.
|
||||||
4. Record verification steps (build/test commands) as they are performed.
|
4. Record verification steps (build/test commands) as they are performed.
|
||||||
5. Maintain UID/GID mapping and non-root execution.
|
5. Maintain UID/GID mapping and non-root execution.
|
||||||
|
|
||||||
Active focus:
|
Active focus:
|
||||||
- Initialize {{toolbox_name}} using the toolbox-template scaffolding; evolve the Dockerfile/tooling inventory to satisfy the SEED goals.
|
- Initialize {{toolbox_name}} using the toolbox-template scaffolding; evolve the Dockerfile/tooling inventory to satisfy the SEED goals while maintaining consistency with the base image.
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
- TODO: describe what this toolbox should provide (languages, CLIs, workflows).
|
- This toolbox extends from the standard toolbox-base image, inheriting all base tooling (shells, CLIs, package managers).
|
||||||
- TODO: list required base image modifications or additional mounts.
|
- Add {{toolbox_name}}-specific tools via aqua.yaml, Dockerfile, or mise configurations.
|
||||||
- TODO: note verification or testing expectations specific to this toolbox.
|
- Document any additional host directory mounts needed in docker-compose.yml.
|
||||||
|
- Ensure all tooling is compatible with the non-root toolbox user and UID/GID mapping.
|
||||||
|
- Update README.md to document {{toolbox_name}}-specific features and tooling.
|
||||||
|
- Follow the same build and run patterns as the base image for consistency.
|
||||||
|
|||||||
8
ToolboxStack/output/toolbox-template/aqua.yaml
Normal file
8
ToolboxStack/output/toolbox-template/aqua.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
version: 1.0.0
|
||||||
|
registries:
|
||||||
|
- type: standard
|
||||||
|
ref: v4.431.0
|
||||||
|
packages:
|
||||||
|
# Add additional packages specific to your toolbox here
|
||||||
|
# Example:
|
||||||
|
# - name: cli/cli@v2.82.1
|
||||||
@@ -2,7 +2,20 @@
|
|||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
IMAGE_NAME="tsysdevstack-toolboxstack-{{toolbox_name}}"
|
# Validate dependencies
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "Error: docker is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker buildx version &> /dev/null; then
|
||||||
|
echo "Error: docker buildx is required but not available." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get the toolbox name from the directory name (or you can pass it as an argument)
|
||||||
|
TOOLBOX_NAME="${TOOLBOX_NAME_OVERRIDE:-$(basename "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)")}"
|
||||||
|
IMAGE_NAME="tsysdevstack-toolboxstack-${TOOLBOX_NAME#toolbox-}"
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
|
||||||
USER_ID="${USER_ID_OVERRIDE:-$(id -u)}"
|
USER_ID="${USER_ID_OVERRIDE:-$(id -u)}"
|
||||||
@@ -15,13 +28,16 @@ CACHE_DIR="${SCRIPT_DIR}/.build-cache"
|
|||||||
echo "Building ${IMAGE_NAME} with UID=${USER_ID} GID=${GROUP_ID} USERNAME=${USERNAME}"
|
echo "Building ${IMAGE_NAME} with UID=${USER_ID} GID=${GROUP_ID} USERNAME=${USERNAME}"
|
||||||
|
|
||||||
if ! docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
if ! docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
||||||
|
echo "Creating builder: ${BUILDER_NAME}"
|
||||||
docker buildx create --driver docker-container --name "${BUILDER_NAME}" --use >/dev/null
|
docker buildx create --driver docker-container --name "${BUILDER_NAME}" --use >/dev/null
|
||||||
else
|
else
|
||||||
|
echo "Using existing builder: ${BUILDER_NAME}"
|
||||||
docker buildx use "${BUILDER_NAME}" >/dev/null
|
docker buildx use "${BUILDER_NAME}" >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p "${CACHE_DIR}"
|
mkdir -p "${CACHE_DIR}"
|
||||||
|
|
||||||
|
echo "Starting build..."
|
||||||
docker buildx build \
|
docker buildx build \
|
||||||
--builder "${BUILDER_NAME}" \
|
--builder "${BUILDER_NAME}" \
|
||||||
--load \
|
--load \
|
||||||
@@ -34,3 +50,13 @@ docker buildx build \
|
|||||||
--cache-to "type=local,dest=${CACHE_DIR},mode=max" \
|
--cache-to "type=local,dest=${CACHE_DIR},mode=max" \
|
||||||
--tag "${IMAGE_NAME}" \
|
--tag "${IMAGE_NAME}" \
|
||||||
"${SCRIPT_DIR}"
|
"${SCRIPT_DIR}"
|
||||||
|
|
||||||
|
echo "Build completed successfully."
|
||||||
|
|
||||||
|
# Run security scan if TRIVY is available
|
||||||
|
if command -v trivy &> /dev/null; then
|
||||||
|
echo "Running security scan with Trivy..."
|
||||||
|
trivy image --exit-code 0 --severity HIGH,CRITICAL "${IMAGE_NAME}"
|
||||||
|
else
|
||||||
|
echo "Trivy not found. Install Trivy to perform security scanning."
|
||||||
|
fi
|
||||||
@@ -18,3 +18,14 @@ services:
|
|||||||
- .:/workspace:rw
|
- .:/workspace:rw
|
||||||
- ${HOME}/.local/share/mise:/home/toolbox/.local/share/mise:rw
|
- ${HOME}/.local/share/mise:/home/toolbox/.local/share/mise:rw
|
||||||
- ${HOME}/.cache/mise:/home/toolbox/.cache/mise:rw
|
- ${HOME}/.cache/mise:/home/toolbox/.cache/mise:rw
|
||||||
|
# AI CLI tool configuration and cache directories
|
||||||
|
- ${HOME}/.config/openai:/home/toolbox/.config/openai:rw
|
||||||
|
- ${HOME}/.config/gemini:/home/toolbox/.config/gemini:rw
|
||||||
|
- ${HOME}/.config/qwen:/home/toolbox/.config/qwen:rw
|
||||||
|
- ${HOME}/.config/code:/home/toolbox/.config/code:rw
|
||||||
|
- ${HOME}/.config/opencode:/home/toolbox/.config/opencode:rw
|
||||||
|
- ${HOME}/.cache/openai:/home/toolbox/.cache/openai:rw
|
||||||
|
- ${HOME}/.cache/gemini:/home/toolbox/.cache/gemini:rw
|
||||||
|
- ${HOME}/.cache/qwen:/home/toolbox/.cache/qwen:rw
|
||||||
|
- ${HOME}/.cache/code:/home/toolbox/.cache/code:rw
|
||||||
|
- ${HOME}/.cache/opencode:/home/toolbox/.cache/opencode:rw
|
||||||
|
|||||||
@@ -2,6 +2,17 @@
|
|||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Validate dependencies
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "Error: docker is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v docker compose &> /dev/null; then
|
||||||
|
echo "Error: docker compose is required but not installed." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
COMPOSE_FILE="${SCRIPT_DIR}/docker-compose.yml"
|
COMPOSE_FILE="${SCRIPT_DIR}/docker-compose.yml"
|
||||||
|
|
||||||
@@ -18,15 +29,21 @@ ACTION="${1:-up}"
|
|||||||
shift || true
|
shift || true
|
||||||
|
|
||||||
if [[ "${ACTION}" == "up" ]]; then
|
if [[ "${ACTION}" == "up" ]]; then
|
||||||
|
# Create necessary directories for the toolbox tools
|
||||||
mkdir -p "${HOME}/.local/share/mise" "${HOME}/.cache/mise"
|
mkdir -p "${HOME}/.local/share/mise" "${HOME}/.cache/mise"
|
||||||
|
mkdir -p "${HOME}/.config" "${HOME}/.local/share"
|
||||||
|
mkdir -p "${HOME}/.cache/openai" "${HOME}/.cache/gemini" "${HOME}/.cache/qwen" "${HOME}/.cache/code" "${HOME}/.cache/opencode"
|
||||||
|
mkdir -p "${HOME}/.config/openai" "${HOME}/.config/gemini" "${HOME}/.config/qwen" "${HOME}/.config/code" "${HOME}/.config/opencode"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
case "${ACTION}" in
|
case "${ACTION}" in
|
||||||
up)
|
up)
|
||||||
docker compose -f "${COMPOSE_FILE}" up --build --detach "$@"
|
docker compose -f "${COMPOSE_FILE}" up --build --detach "$@"
|
||||||
|
echo "Container started. Use 'docker exec -it $(basename "$SCRIPT_DIR" | sed 's/toolbox-//') zsh' to access the shell."
|
||||||
;;
|
;;
|
||||||
down)
|
down)
|
||||||
docker compose -f "${COMPOSE_FILE}" down "$@"
|
docker compose -f "${COMPOSE_FILE}" down "$@"
|
||||||
|
echo "Container stopped."
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Usage: $0 [up|down] [additional docker compose args]" >&2
|
echo "Usage: $0 [up|down] [additional docker compose args]" >&2
|
||||||
|
|||||||
Reference in New Issue
Block a user