feat: implement core Go application with web server
- Add Go modules with required dependencies (Gin, UUID, JWT, etc.) - Implement main web server with landing page endpoint - Add comprehensive API endpoints for health and status - Include proper error handling and request validation - Set up CORS middleware and security headers
This commit is contained in:
369
output/scripts/test.sh
Executable file
369
output/scripts/test.sh
Executable file
@@ -0,0 +1,369 @@
|
||||
#!/bin/bash
|
||||
|
||||
# YourDreamNameHere Test Runner
|
||||
# This script runs all test suites and generates reports
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
PROJECT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
REPORT_DIR="${PROJECT_DIR}/test-reports"
|
||||
COVERAGE_DIR="${REPORT_DIR}/coverage"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Create report directories
|
||||
mkdir -p "$REPORT_DIR"
|
||||
mkdir -p "$COVERAGE_DIR"
|
||||
|
||||
# Functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Test result counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Run unit tests
|
||||
run_unit_tests() {
|
||||
log_info "Running unit tests..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Run unit tests with coverage
|
||||
if go test -v -race -coverprofile="$COVERAGE_DIR/unit.out" -covermode=atomic ./tests/unit/... > "$REPORT_DIR/unit.log" 2>&1; then
|
||||
log_success "Unit tests passed"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
log_error "Unit tests failed"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
echo "Unit test failures:"
|
||||
cat "$REPORT_DIR/unit.log" | tail -20
|
||||
return 1
|
||||
fi
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
# Generate coverage report
|
||||
go tool cover -html="$COVERAGE_DIR/unit.out" -o "$COVERAGE_DIR/unit.html"
|
||||
log_info "Unit test coverage report generated: $COVERAGE_DIR/unit.html"
|
||||
}
|
||||
|
||||
# Run integration tests
|
||||
run_integration_tests() {
|
||||
log_info "Running integration tests..."
|
||||
|
||||
# Check if required services are running
|
||||
if ! docker ps | grep -q "YDN-Dev-App"; then
|
||||
log_warning "Development stack not running. Starting it..."
|
||||
docker-compose -f docker-compose.yml up -d
|
||||
|
||||
# Wait for services to be ready
|
||||
log_info "Waiting for services to be ready..."
|
||||
sleep 30
|
||||
|
||||
# Run health checks
|
||||
max_attempts=30
|
||||
attempt=0
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if curl -f http://localhost:8080/health > /dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
sleep 2
|
||||
done
|
||||
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
log_error "Services failed to start properly"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Run integration tests
|
||||
if go test -v -race -coverprofile="$COVERAGE_DIR/integration.out" ./tests/integration/... > "$REPORT_DIR/integration.log" 2>&1; then
|
||||
log_success "Integration tests passed"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
log_error "Integration tests failed"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
echo "Integration test failures:"
|
||||
cat "$REPORT_DIR/integration.log" | tail -20
|
||||
return 1
|
||||
fi
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
# Generate coverage report
|
||||
go tool cover -html="$COVERAGE_DIR/integration.out" -o "$COVERAGE_DIR/integration.html"
|
||||
log_info "Integration test coverage report generated: $COVERAGE_DIR/integration.html"
|
||||
}
|
||||
|
||||
# Run E2E tests
|
||||
run_e2e_tests() {
|
||||
log_info "Running E2E tests..."
|
||||
|
||||
# Check if E2E tests are enabled
|
||||
if [ "${ENABLE_E2E_TESTS:-false}" != "true" ]; then
|
||||
log_warning "E2E tests disabled. Set ENABLE_E2E_TESTS=true to enable."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Check if required tools are available
|
||||
if ! command -v chromedriver &> /dev/null; then
|
||||
log_error "ChromeDriver not found. Please install ChromeDriver to run E2E tests."
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Start ChromeDriver
|
||||
chromedriver --port=9515 --silent &
|
||||
CHROME_DRIVER_PID=$!
|
||||
|
||||
# Make sure to kill ChromeDriver on exit
|
||||
trap "kill $CHROME_DRIVER_PID 2>/dev/null || true" EXIT
|
||||
|
||||
# Wait for ChromeDriver to start
|
||||
sleep 5
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Run E2E tests
|
||||
if go test -v ./tests/e2e/... > "$REPORT_DIR/e2e.log" 2>&1; then
|
||||
log_success "E2E tests passed"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
log_error "E2E tests failed"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
echo "E2E test failures:"
|
||||
cat "$REPORT_DIR/e2e.log" | tail -20
|
||||
return 1
|
||||
fi
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
}
|
||||
|
||||
# Run security tests
|
||||
run_security_tests() {
|
||||
log_info "Running security tests..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Check for common security issues using gosec
|
||||
if command -v gosec &> /dev/null; then
|
||||
if gosec -quiet -fmt json -out "$REPORT_DIR/gosec.json" ./...; then
|
||||
log_success "Security scan passed"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
log_warning "Security scan found issues"
|
||||
cat "$REPORT_DIR/gosec.json" | jq '.Issues[] | .severity + ": " + .details'
|
||||
fi
|
||||
else
|
||||
log_warning "gosec not found. Install with: go install github.com/securecodewarrior/gosec/v2/cmd/gosec@latest"
|
||||
fi
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
}
|
||||
|
||||
# Run performance tests
|
||||
run_performance_tests() {
|
||||
log_info "Running performance tests..."
|
||||
|
||||
# Basic performance test using curl
|
||||
if command -v curl &> /dev/null; then
|
||||
log_info "Testing API response times..."
|
||||
|
||||
# Test health endpoint
|
||||
response_time=$(curl -o /dev/null -s -w '%{time_total}' http://localhost:8080/health || echo "0")
|
||||
|
||||
if (( $(echo "$response_time < 1.0" | bc -l) )); then
|
||||
log_success "Health endpoint response time: ${response_time}s"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
log_warning "Health endpoint response time high: ${response_time}s"
|
||||
fi
|
||||
else
|
||||
log_warning "curl not available for performance testing"
|
||||
fi
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
}
|
||||
|
||||
# Generate combined coverage report
|
||||
generate_coverage_report() {
|
||||
log_info "Generating combined coverage report..."
|
||||
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Combine coverage profiles
|
||||
echo "mode: atomic" > "$COVERAGE_DIR/combined.out"
|
||||
|
||||
for profile in "$COVERAGE_DIR"/*.out; do
|
||||
if [ -f "$profile" ] && [ "$profile" != "$COVERAGE_DIR/combined.out" ]; then
|
||||
grep -h -v "^mode:" "$profile" >> "$COVERAGE_DIR/combined.out" || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Generate HTML report
|
||||
if [ -s "$COVERAGE_DIR/combined.out" ]; then
|
||||
go tool cover -html="$COVERAGE_DIR/combined.out" -o "$COVERAGE_DIR/combined.html"
|
||||
log_success "Combined coverage report generated: $COVERAGE_DIR/combined.html"
|
||||
|
||||
# Get coverage percentage
|
||||
coverage_percent=$(go tool cover -func="$COVERAGE_DIR/combined.out" | grep "total:" | awk '{print $3}')
|
||||
log_info "Total coverage: $coverage_percent"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate test summary
|
||||
generate_summary() {
|
||||
log_info "Generating test summary..."
|
||||
|
||||
cat > "$REPORT_DIR/summary.txt" << EOF
|
||||
YourDreamNameHere Test Summary
|
||||
================================
|
||||
|
||||
Test Results:
|
||||
- Total test suites: $TOTAL_TESTS
|
||||
- Passed: $PASSED_TESTS
|
||||
- Failed: $FAILED_TESTS
|
||||
- Success rate: $(( PASSED_TESTS * 100 / TOTAL_TESTS ))%
|
||||
|
||||
Test Reports:
|
||||
- Unit tests: $REPORT_DIR/unit.log
|
||||
- Integration tests: $REPORT_DIR/integration.log
|
||||
- E2E tests: $REPORT_DIR/e2e.log
|
||||
- Security scan: $REPORT_DIR/gosec.json
|
||||
|
||||
Coverage Reports:
|
||||
- Unit test coverage: $COVERAGE_DIR/unit.html
|
||||
- Integration test coverage: $COVERAGE_DIR/integration.html
|
||||
- Combined coverage: $COVERAGE_DIR/combined.html
|
||||
|
||||
Generated at: $(date)
|
||||
EOF
|
||||
|
||||
cat "$REPORT_DIR/summary.txt"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log_info "Starting YourDreamNameHere test suite..."
|
||||
log_info "Project directory: $PROJECT_DIR"
|
||||
log_info "Report directory: $REPORT_DIR"
|
||||
|
||||
# Change to project directory
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
# Download test dependencies
|
||||
log_info "Downloading test dependencies..."
|
||||
go mod download
|
||||
go mod tidy
|
||||
|
||||
# Run test suites
|
||||
run_unit_tests
|
||||
run_integration_tests
|
||||
run_e2e_tests
|
||||
run_security_tests
|
||||
run_performance_tests
|
||||
|
||||
# Generate reports
|
||||
generate_coverage_report
|
||||
generate_summary
|
||||
|
||||
# Final status
|
||||
log_info "Test suite completed!"
|
||||
log_info "Results: $PASSED_TESTS/$TOTAL_TESTS passed"
|
||||
|
||||
if [ $FAILED_TESTS -eq 0 ]; then
|
||||
log_success "All tests passed! 🎉"
|
||||
exit 0
|
||||
else
|
||||
log_error "$FAILED_TESTS test suites failed!"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Help function
|
||||
show_help() {
|
||||
cat << EOF
|
||||
YourDreamNameHere Test Runner
|
||||
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Options:
|
||||
--unit Run only unit tests
|
||||
--integration Run only integration tests
|
||||
--e2e Run only E2E tests (requires ENABLE_E2E_TESTS=true)
|
||||
--security Run only security tests
|
||||
--performance Run only performance tests
|
||||
--coverage Generate coverage reports only
|
||||
--help Show this help message
|
||||
|
||||
Environment Variables:
|
||||
ENABLE_E2E_TESTS=true Enable E2E tests
|
||||
COVERAGE_THRESHOLD=80 Minimum coverage percentage (default: 80)
|
||||
|
||||
Examples:
|
||||
$0 # Run all tests
|
||||
$0 --unit # Run only unit tests
|
||||
$0 --integration # Run only integration tests
|
||||
ENABLE_E2E_TESTS=true $0 # Run all tests including E2E
|
||||
EOF
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
case "${1:-}" in
|
||||
--unit)
|
||||
run_unit_tests
|
||||
;;
|
||||
--integration)
|
||||
run_integration_tests
|
||||
;;
|
||||
--e2e)
|
||||
run_e2e_tests
|
||||
;;
|
||||
--security)
|
||||
run_security_tests
|
||||
;;
|
||||
--performance)
|
||||
run_performance_tests
|
||||
;;
|
||||
--coverage)
|
||||
generate_coverage_report
|
||||
;;
|
||||
--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
"")
|
||||
main
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
Reference in New Issue
Block a user