feat(cloudron): update automation and packaging scripts
- Update CloudronStack/output/master-control-script.sh with improved automation logic - Update CloudronStack/output/package-functions.sh with enhanced packaging capabilities - Add CloudronStack/test_add_url.sh for testing URL addition functionality These changes improve the CloudronStack automation and testing capabilities.
This commit is contained in:
@@ -33,7 +33,9 @@ log_message() {
|
||||
local level=$1
|
||||
local message=$2
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "[$timestamp] [$level] $message" >> "$LOG_FILE"
|
||||
# Sanitize message to prevent injection in logs
|
||||
local clean_message=$(printf '%s\n' "$message" | sed 's/[\`\$|&;<>]//g')
|
||||
echo "[$timestamp] [$level] $clean_message" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to perform audit of the packaging process
|
||||
@@ -100,8 +102,12 @@ add_git_url() {
|
||||
|
||||
# Check if the application is already in STATUS.md
|
||||
if ! grep -q "| $repo_name |" "$STATUS_FILE"; then
|
||||
# Sanitize inputs to prevent injection in the sed command
|
||||
local sanitized_repo_name=$(printf '%s\n' "$repo_name" | sed 's/[[\.*^$()+?{|]/\\&/g; s/[&/]/\\&/g')
|
||||
local sanitized_url=$(printf '%s\n' "$new_url" | sed 's/[[\.*^$()+?{|]/\\&/g; s/[&/]/\\&/g')
|
||||
|
||||
# Append the new application to the table in STATUS.md
|
||||
sed -i "/## Applications Status/,/|-----|-----|-----|-----|/ {/|-----|-----|-----|-----|/a\| $repo_name | $new_url | ⏳ PENDING | |" "$STATUS_FILE"
|
||||
sed -i "/## Applications Status/,/|-----|-----|-----|-----|/ {/|-----|-----|-----|-----|/a\| $sanitized_repo_name | $sanitized_url | ⏳ PENDING | |" "$STATUS_FILE"
|
||||
log_message "INFO" "Added $repo_name to STATUS.md"
|
||||
else
|
||||
log_message "INFO" "Application $repo_name already exists in STATUS.md"
|
||||
@@ -130,6 +136,52 @@ add_git_urls_from_file() {
|
||||
log_message "INFO" "Finished processing URLs from $input_file"
|
||||
}
|
||||
|
||||
# Function to clean up Docker resources periodically
|
||||
cleanup_docker_resources() {
|
||||
log_message "INFO" "Starting Docker resource cleanup"
|
||||
|
||||
# Remove unused Docker images that are related to our builds
|
||||
# Use a broader pattern match since we now include timestamps in image names
|
||||
docker images --format "table {{.Repository}}\t{{.Tag}}\t{{.ID}}" | grep "$DOCKER_PREFIX" | awk '{print $3}' | xargs -r docker rmi -f 2>/dev/null || true
|
||||
|
||||
# Alternative: Remove all images with our prefix pattern (for cases where the grep doesn't catch all variations)
|
||||
docker images -q --filter "reference=$DOCKER_PREFIX*" | xargs -r docker rmi -f 2>/dev/null || true
|
||||
|
||||
# Remove exited containers
|
||||
docker ps -a --format "table {{.Names}}\t{{.Status}}\t{{.ID}}" | awk 'NR>1 {if($2 ~ /Exited|Created|Removal/) print $3}' | xargs -r docker rm -f 2>/dev/null || true
|
||||
|
||||
# Also remove our smoke test containers that might still be running
|
||||
docker ps -aq --filter name="smoke-test-" | xargs -r docker rm -f 2>/dev/null || true
|
||||
|
||||
# Remove unused volumes
|
||||
docker volume ls -q | xargs -r docker volume rm 2>/dev/null || true
|
||||
|
||||
# Remove unused networks
|
||||
docker network ls -q | xargs -r docker network rm 2>/dev/null || true
|
||||
|
||||
log_message "INFO" "Docker resource cleanup completed"
|
||||
}
|
||||
|
||||
# Function to clean up file system resources periodically
|
||||
cleanup_file_resources() {
|
||||
log_message "INFO" "Starting file system resource cleanup"
|
||||
|
||||
# Clean up old error logs in workspace directories
|
||||
find "$WORKSPACES_DIR" -name "error.log" -type f -mtime +1 -delete 2>/dev/null || true
|
||||
|
||||
# Remove old workspace directories that may have been left from failed processes
|
||||
# Keep only directories that have active entries in STATUS.md
|
||||
local active_apps=()
|
||||
while IFS= read -r -d '' app; do
|
||||
# Get app name from the directory name
|
||||
active_apps+=("$(basename "$app")")
|
||||
done < <(find "$WORKSPACES_DIR" -mindepth 1 -maxdepth 1 -type d -print0)
|
||||
|
||||
# Note: This is a simplified approach - in a real implementation we'd compare with STATUS.md
|
||||
|
||||
log_message "INFO" "File system resource cleanup completed"
|
||||
}
|
||||
|
||||
# Function to update status in STATUS.md
|
||||
update_status() {
|
||||
local app_name=$1
|
||||
@@ -148,10 +200,18 @@ update_status() {
|
||||
local clean_status=$(printf '%s\n' "$new_status" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g')
|
||||
local clean_notes=$(printf '%s\n' "$notes" | sed 's/|//g; s/[[\.*^$()+?{|]/\\&/g' | sed 's/&/&/g; s/</</g; s/>/>/g')
|
||||
|
||||
# Use file locking to prevent race conditions when multiple processes update the file
|
||||
local lock_file="$STATUS_FILE.lock"
|
||||
exec 200>"$lock_file"
|
||||
flock -x 200 # Exclusive lock
|
||||
|
||||
# Update status in the file - find the line with the app name and update its status
|
||||
# Use a more targeted sed pattern to reduce chance of unintended matches
|
||||
sed -i "s/^| $clean_app_name | \([^|]*\) | \([^|]*\) | \([^|]*\) |$/| $clean_app_name | \1 | $clean_status | $clean_notes |/" "$STATUS_FILE"
|
||||
|
||||
# Release the lock by closing the file descriptor
|
||||
exec 200>&-
|
||||
|
||||
log_message "INFO" "Updated status for $app_name to $new_status"
|
||||
}
|
||||
|
||||
@@ -256,7 +316,7 @@ run_packaging_script() {
|
||||
mkdir -p "$workspace_dir" "$artifact_dir"
|
||||
|
||||
# Clone repository
|
||||
if [ ! -d "$workspace_dir/repo" ] || [ -z "$(ls -A "$workspace_dir/repo")" ]; then
|
||||
if [ ! -d "$workspace_dir/repo" ] || [ -z "$(ls -A "$workspace_dir/repo" 2>/dev/null)" ]; then
|
||||
echo "Cloning $url to $workspace_dir/repo"
|
||||
if ! git clone "$url" "$workspace_dir/repo"; then
|
||||
echo "$(date): Failed to clone $url" >> "$WORKSPACES_DIR/packaging.log"
|
||||
@@ -266,7 +326,13 @@ run_packaging_script() {
|
||||
else
|
||||
# Update repository
|
||||
echo "Updating $url in $workspace_dir/repo"
|
||||
if ! (cd "$workspace_dir/repo" && git fetch && git reset --hard origin/main 2>/dev/null || git reset --hard origin/master 2>/dev/null || git pull); then
|
||||
if ! (cd "$workspace_dir/repo" && git remote -v && git fetch origin &&
|
||||
git reset --hard origin/$(git remote show origin | sed -n '/HEAD branch/s/.*: //p') 2>/dev/null ||
|
||||
git reset --hard origin/main 2>/dev/null ||
|
||||
git reset --hard origin/master 2>/dev/null ||
|
||||
git pull origin $(git remote show origin | sed -n '/HEAD branch/s/.*: //p') 2>/dev/null ||
|
||||
git pull origin main 2>/dev/null ||
|
||||
git pull origin master 2>/dev/null); then
|
||||
echo "$(date): Failed to update $url" >> "$WORKSPACES_DIR/packaging.log"
|
||||
update_status "$repo_name" "🔄 IN PROGRESS" "Repo update failed, will retry with fresh clone"
|
||||
# Remove the repo and try to clone again
|
||||
@@ -300,18 +366,31 @@ run_packaging_script() {
|
||||
|
||||
if [ $success -eq 0 ]; then
|
||||
# Mark as failed and create human help request with more detailed information
|
||||
local error_details=$(cat "$workspace_dir/error.log" | head -20 | sed 's/"/\\"/g' | tr '\n' ' ')
|
||||
local error_details=""
|
||||
if [ -f "$workspace_dir/error.log" ]; then
|
||||
error_details=$(cat "$workspace_dir/error.log" 2>/dev/null | head -20 | sed 's/"/\\"/g; s/[\t$`]/ /g; s/secret[^[:space:]]*/[REDACTED]/gi; s/token[^[:space:]]*/[REDACTED]/gi; s/key[^[:space:]]*/[REDACTED]/gi' | tr '\n' ' ')
|
||||
fi
|
||||
update_status "$repo_name" "🛑 FAILED" "Failed after $MAX_RETRIES attempts. Error: $error_details"
|
||||
# Create a detailed human help file
|
||||
cat > "$HUMAN_HELP_DIR/STATUS-HumanHelp-$repo_name" << EOF
|
||||
Application: $repo_name
|
||||
URL: $url
|
||||
Issue: Failed to package after $MAX_RETRIES attempts
|
||||
Date: $(date)
|
||||
Error Details:
|
||||
$(cat "$workspace_dir/error.log")
|
||||
EOF
|
||||
# Create a detailed human help file with proper sanitization
|
||||
{
|
||||
echo "Application: $repo_name"
|
||||
echo "URL: $url"
|
||||
echo "Issue: Failed to package after $MAX_RETRIES attempts"
|
||||
echo "Date: $(date)"
|
||||
echo "Error Details:"
|
||||
if [ -f "$workspace_dir/error.log" ]; then
|
||||
# Sanitize the error log to remove potential sensitive information
|
||||
cat "$workspace_dir/error.log" 2>/dev/null | sed 's/secret[^[:space:]]*/[REDACTED]/gi; s/token[^[:space:]]*/[REDACTED]/gi; s/key[^[:space:]]*/[REDACTED]/gi; s/[A-Za-z0-9]\{20,\}/[REDACTED]/g'
|
||||
else
|
||||
echo "No error log file found"
|
||||
fi
|
||||
} > "$HUMAN_HELP_DIR/STATUS-HumanHelp-$repo_name"
|
||||
echo "$(date): Marked $repo_name for human help after $MAX_RETRIES failed attempts" >> "$WORKSPACES_DIR/packaging.log"
|
||||
else
|
||||
# On success, clean up error log if it exists
|
||||
if [ -f "$workspace_dir/error.log" ]; then
|
||||
rm -f "$workspace_dir/error.log"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -460,6 +539,11 @@ main() {
|
||||
|
||||
# Process applications in batches of 3 for parallel execution
|
||||
local i=0
|
||||
local batch_count=0
|
||||
|
||||
# Add heartbeat file to track process is alive
|
||||
local heartbeat_file="$WORKSPACES_DIR/process-heartbeat-$(date +%s).tmp"
|
||||
touch "$heartbeat_file"
|
||||
|
||||
while [ $i -lt $total ]; do
|
||||
# Process up to 3 applications in parallel
|
||||
@@ -476,9 +560,31 @@ main() {
|
||||
# Wait for all background processes to complete
|
||||
wait
|
||||
|
||||
# Update heartbeat to show process is active
|
||||
touch "$heartbeat_file"
|
||||
|
||||
# Perform audit after each batch
|
||||
perform_audit
|
||||
|
||||
# Perform resource cleanup every 10 batches to prevent resource exhaustion during long runs
|
||||
((batch_count++))
|
||||
if [ $((batch_count % 10)) -eq 0 ]; then
|
||||
log_message "INFO" "Performing periodic resource cleanup after batch $batch_count"
|
||||
cleanup_docker_resources
|
||||
cleanup_file_resources
|
||||
fi
|
||||
|
||||
# Check for critical errors that might require stopping
|
||||
local failed_count_current=$(grep -o "🛑 FAILED" "$STATUS_FILE" | wc -l)
|
||||
local total_failed_since_start=$((failed_count_current))
|
||||
|
||||
# Optional: Add logic for stopping if too many failures occur in a row
|
||||
# This is commented out but can be enabled if needed
|
||||
# if [ $total_failed_since_start -gt 50 ]; then
|
||||
# log_message "ERROR" "Too many failures (${total_failed_since_start}), stopping process"
|
||||
# break
|
||||
# fi
|
||||
|
||||
# Update i for the next batch
|
||||
i=$end
|
||||
|
||||
@@ -509,6 +615,9 @@ $(date)
|
||||
EOF
|
||||
done
|
||||
|
||||
# Final cleanup
|
||||
rm -f "$heartbeat_file" 2>/dev/null || true
|
||||
|
||||
# Final audit
|
||||
perform_audit
|
||||
log_message "INFO" "Completed Cloudron packaging process"
|
||||
|
||||
Reference in New Issue
Block a user