Files
football/run.sh
Charles N Wyble 59c96113fd fix: use libvirt session URI consistently, remove QEMU fallback
- All virsh commands now use qemu:///session explicitly
- Removed direct QEMU fallback (libvirt only)
- Added VM XML template in vm/template.xml with variable substitution
- Template generates UUID and MAC address dynamically
- VM is now properly defined AND started in libvirt
- Fixed vm_destroy, vm_console, vm_status, vm_is_running, vm_capture_screen
- Added UUID fallback using /proc/sys/kernel/random/uuid

Fixes issue where VMs were created but not visible/running in virt-manager.

💘 Generated with Crush

Assisted-by: GLM-5 via Crush <crush@charm.land>
2026-02-19 13:05:01 -05:00

600 lines
19 KiB
Bash
Executable File

#!/bin/bash
# KNEL-Football ISO Builder - Main Entry Point
# Orchestrates Docker-based build process and VM testing
# Copyright © 2026 Known Element Enterprises LLC
# License: GNU Affero General Public License v3.0 only
set -euo pipefail
# Configuration variables
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
readonly SCRIPT_DIR
readonly DOCKER_IMAGE="knel-football-dev:latest"
readonly OUTPUT_DIR="${SCRIPT_DIR}/output"
readonly BUILD_DIR="${SCRIPT_DIR}/tmp"
readonly BUILD_LOG="/tmp/knel-iso-build.log"
# VM Testing Configuration
readonly ISO_PATH="${SCRIPT_DIR}/output/knel-football-secure.iso"
readonly VM_NAME="knel-football-test"
readonly VM_RAM="2048"
readonly VM_CPUS="2"
readonly VM_DISK_SIZE="10G"
readonly VM_DISK_PATH="/tmp/${VM_NAME}.qcow2"
# Colors for output
readonly RED='\033[0;31m'
readonly GREEN='\033[0;32m'
readonly YELLOW='\033[1;33m'
readonly NC='\033[0m' # No Color
# Logging functions
log_info() { echo -e "${GREEN}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Create output and build directories if they don't exist
mkdir -p "${OUTPUT_DIR}" "${BUILD_DIR}"
# ============================================================================
# VM TESTING FUNCTIONS (merged from test-iso.sh)
# ============================================================================
# Check VM testing prerequisites
vm_check_prerequisites() {
log_info "Checking VM testing prerequisites..."
# Check for virsh command
if ! command -v virsh &> /dev/null; then
log_error "virsh command not found"
log_error "Install libvirt: sudo apt install libvirt-clients libvirt-daemon-system qemu-system-x86"
return 1
fi
# Check actual libvirt access (not just group membership)
if ! virsh list &> /dev/null; then
log_error "Cannot connect to libvirt"
log_error "Ensure libvirtd is running and you have access"
log_error "Try: sudo usermod -aG libvirt \$USER && logout/login"
return 1
fi
# Check for qemu-img command
if ! command -v qemu-img &> /dev/null; then
log_error "qemu-img command not found"
log_error "Install qemu: sudo apt install qemu-utils"
return 1
fi
# Check ISO exists
if [[ ! -f "$ISO_PATH" ]]; then
log_error "ISO not found at: $ISO_PATH"
log_error "Build the ISO first: ./run.sh iso"
return 1
fi
log_info "All prerequisites satisfied"
return 0
}
# Create VM disk image
vm_create_disk() {
log_info "Creating disk image: $VM_DISK_PATH"
rm -f "$VM_DISK_PATH"
qemu-img create -f qcow2 "$VM_DISK_PATH" "$VM_DISK_SIZE"
}
# Create and start VM
vm_create() {
# Define libvirt session URI (use session for user-mode, avoids sudo)
local LIBVIRT_URI="qemu:///session"
log_info "Creating VM: $VM_NAME (libvirt: $LIBVIRT_URI)"
# Destroy existing VM if present (in session libvirt)
virsh -c "$LIBVIRT_URI" destroy "$VM_NAME" 2>/dev/null || true
virsh -c "$LIBVIRT_URI" undefine "$VM_NAME" --nvram 2>/dev/null || true
# Create disk
vm_create_disk
# Find UEFI firmware with Secure Boot support (REQUIRED)
local uefi_code=""
local uefi_vars=""
# Prefer Secure Boot enabled firmware
for fw_dir in /usr/share/OVMF /usr/share/qemu; do
if [[ -f "$fw_dir/OVMF_CODE_4M.secboot.fd" ]]; then
uefi_code="$fw_dir/OVMF_CODE_4M.secboot.fd"
uefi_vars="$fw_dir/OVMF_VARS_4M.ms.fd"
break
elif [[ -f "$fw_dir/OVMF_CODE_4M.fd" ]]; then
uefi_code="$fw_dir/OVMF_CODE_4M.fd"
uefi_vars="$fw_dir/OVMF_VARS_4M.fd"
break
elif [[ -f "$fw_dir/OVMF_CODE.fd" ]]; then
uefi_code="$fw_dir/OVMF_CODE.fd"
uefi_vars="$fw_dir/OVMF_VARS.fd"
break
fi
done
if [[ -z "$uefi_code" || ! -f "$uefi_code" ]]; then
log_error "UEFI firmware with Secure Boot (OVMF) not found"
log_error "Install required: sudo apt install ovmf"
log_error "UEFI with Secure Boot is REQUIRED for KNEL-Football testing"
return 1
fi
# Create copy of OVMF_VARS for this VM (Secure Boot state stored here)
local vm_vars="/tmp/${VM_NAME}_VARS.fd"
cp "$uefi_vars" "$vm_vars"
if [[ "$uefi_code" == *".secboot.fd" ]]; then
log_info "Using UEFI firmware with Secure Boot: $uefi_code"
else
log_warn "Using UEFI firmware WITHOUT Secure Boot: $uefi_code"
log_warn "For full Secure Boot testing, install: sudo apt install ovmf"
fi
# Determine secure boot setting
local secure_boot="no"
if [[ "$uefi_code" == *".secboot.fd" ]]; then
secure_boot="yes"
fi
# Use persisted XML template
local template="${SCRIPT_DIR}/vm/template.xml"
if [[ ! -f "$template" ]]; then
log_error "VM template not found: $template"
return 1
fi
# Generate dynamic values
local vm_uuid
if command -v uuidgen &> /dev/null; then
vm_uuid=$(uuidgen)
else
vm_uuid=$(cat /proc/sys/kernel/random/uuid)
fi
local mac_address="52:54:00:$(od -An -N3 -tx1 /dev/urandom | tr ' ' ':' | cut -c2-13)"
# Create VM XML from template
local vm_xml="/tmp/${VM_NAME}.xml"
sed -e "s|@VM_NAME@|${VM_NAME}|g" \
-e "s|@VM_UUID@|${vm_uuid}|g" \
-e "s|@VM_RAM@|${VM_RAM}|g" \
-e "s|@VM_CPUS@|${VM_CPUS}|g" \
-e "s|@SECURE_BOOT@|${secure_boot}|g" \
-e "s|@UEFI_CODE@|${uefi_code}|g" \
-e "s|@UEFI_VARS@|${vm_vars}|g" \
-e "s|@VM_DISK@|${VM_DISK_PATH}|g" \
-e "s|@ISO_PATH@|${ISO_PATH}|g" \
-e "s|@MAC_ADDRESS@|${mac_address}|g" \
"$template" > "$vm_xml"
# Define and START the VM
if ! virsh -c "$LIBVIRT_URI" define "$vm_xml"; then
log_error "Failed to define VM from XML"
return 1
fi
if ! virsh -c "$LIBVIRT_URI" start "$VM_NAME"; then
log_error "Failed to start VM"
return 1
fi
# Get VNC display info
local vnc_display
vnc_display=$(virsh -c "$LIBVIRT_URI" vncdisplay "$VM_NAME" 2>/dev/null)
log_info "VM created and STARTED successfully"
log_info "VNC display: $vnc_display"
log_info ""
log_info "To connect in virt-manager:"
log_info " 1. Open virt-manager"
log_info " 2. File → Add Connection"
log_info " 3. Enter URI: qemu:///session"
log_info " 4. Connect and double-click '$VM_NAME'"
}
# Connect to VM console
vm_console() {
local LIBVIRT_URI="qemu:///session"
log_info "Connecting to VM console..."
virsh -c "$LIBVIRT_URI" console "$VM_NAME"
}
# Get VM status
vm_status() {
local LIBVIRT_URI="qemu:///session"
log_info "VM Status for: $VM_NAME"
virsh -c "$LIBVIRT_URI" dominfo "$VM_NAME" 2>/dev/null || log_error "VM not running"
}
# Check if VM is running
vm_is_running() {
local LIBVIRT_URI="qemu:///session"
# Check virsh session
if virsh -c "$LIBVIRT_URI" domstate "$VM_NAME" 2>/dev/null | grep -q "running"; then
return 0
fi
return 1
}
# Capture boot screenshot
vm_capture_screen() {
local LIBVIRT_URI="qemu:///session"
local output_dir="${SCRIPT_DIR}/tmp/vm-screenshots"
mkdir -p "$output_dir"
log_info "Capturing boot screen..."
virsh -c "$LIBVIRT_URI" screenshot "$VM_NAME" "${output_dir}/boot-screen.ppm" 2>/dev/null || {
log_warn "Could not capture screenshot"
}
}
# Destroy VM and cleanup
vm_destroy() {
local LIBVIRT_URI="qemu:///session"
log_info "Destroying VM: $VM_NAME"
virsh -c "$LIBVIRT_URI" destroy "$VM_NAME" 2>/dev/null || true
virsh -c "$LIBVIRT_URI" undefine "$VM_NAME" --nvram 2>/dev/null || true
rm -f "$VM_DISK_PATH" "/tmp/${VM_NAME}_VARS.fd" "/tmp/${VM_NAME}.xml"
log_info "Cleanup complete"
}
# Run automated boot test
vm_boot_test() {
log_info "Running automated boot test..."
if ! vm_check_prerequisites; then
return 1
fi
vm_create
log_info "Waiting for VM to boot (30 seconds)..."
sleep 30
if vm_is_running; then
log_info "VM is running - boot test PASSED"
vm_status
vm_capture_screen
return 0
else
log_error "VM not running - boot test FAILED"
return 1
fi
}
# Test Secure Boot
vm_test_secure_boot() {
log_info "Testing Secure Boot..."
if ! vm_is_running; then
log_error "VM not running, start it first"
return 1
fi
log_info "Secure Boot verification requires manual console inspection"
log_info "Use: ./run.sh test:iso console"
log_info "Then check: dmesg | grep -i secure"
}
# Test FDE passphrase prompt
vm_test_fde() {
log_info "Testing FDE passphrase prompt..."
if ! vm_is_running; then
log_error "VM not running, start it first"
return 1
fi
log_info "FDE prompt verification requires manual console inspection"
log_info "Use: ./run.sh test:iso console"
log_info "Watch for 'Please unlock disk' prompt during boot"
}
# ============================================================================
# BUILD MONITOR FUNCTION (merged from monitor-build.sh)
# ============================================================================
monitor_build() {
local check_interval="${1:-180}"
echo "=== ISO Build Monitor ==="
echo "Started: $(date)"
echo "Checking every ${check_interval}s"
echo "Log file: $BUILD_LOG"
echo ""
while true; do
if [ -f "$BUILD_LOG" ]; then
local lines
lines=$(wc -l < "$BUILD_LOG")
local last_stage
last_stage=$(grep -E "^\[.*\] lb (bootstrap|chroot|installer|binary|source)" "$BUILD_LOG" 2>/dev/null | tail -1)
local errors
errors=$(grep -ic "error\|failed\|fatal" "$BUILD_LOG" 2>/dev/null || echo "0")
echo "[$(date '+%H:%M:%S')] Lines: $lines | Errors: $errors"
[ -n "$last_stage" ] && echo " Stage: $last_stage"
# Check if build completed
if grep -q "ISO build completed" "$BUILD_LOG" 2>/dev/null; then
echo ""
echo "=== BUILD COMPLETED ==="
echo "Finished: $(date)"
ls -lh "${OUTPUT_DIR}"/*.iso 2>/dev/null || echo "No ISO found in output/"
break
fi
# Check if build failed
if grep -q "ISO build failed" "$BUILD_LOG" 2>/dev/null; then
echo ""
echo "=== BUILD FAILED ==="
echo "Check log: $BUILD_LOG"
tail -20 "$BUILD_LOG"
break
fi
else
echo "[$(date '+%H:%M:%S')] Waiting for build log..."
fi
sleep "$check_interval"
done
}
# ============================================================================
# USAGE AND MAIN
# ============================================================================
usage() {
cat <<EOF
KNEL-Football ISO Builder - Main Entry Point
Usage: $0 <command> [args]
Build Commands:
build Build Docker image
iso Build ISO (60-90 minutes)
monitor [secs] Monitor build progress (default: check every 180s)
clean Clean build artifacts
Test Commands:
test Run all tests
test:unit Run unit tests only
test:integration Run integration tests only
test:security Run security tests only
test:system Run system tests only (requires libvirt)
lint Run linting checks (shellcheck)
VM Testing Commands (requires libvirt on host):
test:iso check Check VM testing prerequisites
test:iso create Create and start test VM (UEFI/Secure Boot)
test:iso console Connect to VM console
test:iso status Show VM status
test:iso destroy Destroy VM and cleanup
test:iso boot-test Run automated boot test
test:iso secure-boot Test Secure Boot (manual verification)
test:iso fde-test Test FDE passphrase prompt (manual verification)
Other Commands:
shell Interactive shell in build container
help Show this help message
Prerequisites for VM Testing:
- User must be in libvirt group
- libvirtd service must be running
- OVMF must be installed (sudo apt install ovmf)
- ISO must exist in output/
Examples:
$0 build # Build Docker image
$0 iso # Build ISO (60-90 min)
$0 monitor # Monitor build progress
$0 test # Run all tests
$0 test:iso boot-test # Boot test in VM
$0 test:iso console # Connect to VM console
$0 test:iso destroy # Cleanup test VM
Note: After adding user to libvirt group, logout and login again.
EOF
exit 1
}
# Main entry point
main() {
local command="${1:-help}"
case "${command}" in
build)
echo "Building KNEL-Football Docker image..."
docker build -t "${DOCKER_IMAGE}" "${SCRIPT_DIR}"
;;
test)
echo "Running KNEL-Football test suite..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${BUILD_DIR}:/build" \
-e BATS_TMPDIR=/build/tmp \
"${DOCKER_IMAGE}" \
bash -c "cd /workspace && bats tests/simple_test.bats tests/unit/ tests/integration/ tests/security/ tests/system/"
;;
test:unit)
echo "Running unit tests..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${BUILD_DIR}:/build" \
-e BATS_TMPDIR=/build/tmp \
"${DOCKER_IMAGE}" \
bash -c "cd /workspace && bats tests/unit/"
;;
test:integration)
echo "Running integration tests..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${BUILD_DIR}:/build" \
-e BATS_TMPDIR=/build/tmp \
"${DOCKER_IMAGE}" \
bash -c "cd /workspace && bats tests/integration/"
;;
test:security)
echo "Running security tests..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${BUILD_DIR}:/build" \
-e BATS_TMPDIR=/build/tmp \
"${DOCKER_IMAGE}" \
bash -c "cd /workspace && bats tests/security/"
;;
test:system)
echo "Running system tests..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${BUILD_DIR}:/build" \
-e BATS_TMPDIR=/build/tmp \
"${DOCKER_IMAGE}" \
bash -c "cd /workspace && bats tests/system/"
;;
lint)
echo "Running linting checks..."
docker run --rm \
-v "${SCRIPT_DIR}:/workspace:ro" \
"${DOCKER_IMAGE}" \
bash -c "find /workspace -name '*.sh' -print0 | xargs -0 shellcheck"
;;
clean)
echo "Cleaning build artifacts..."
rm -rf "${OUTPUT_DIR:?}"/*
rm -rf "${BUILD_DIR:?}"/*
;;
shell)
echo "Starting interactive shell..."
docker run --rm -it \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${OUTPUT_DIR}:/output" \
-v "${BUILD_DIR}:/build" \
-u "$(id -u):$(id -g)" \
-e TZ="America/Chicago" \
-e DEBIAN_FRONTEND="noninteractive" \
-e LC_ALL="C" \
"${DOCKER_IMAGE}" \
bash
;;
iso)
echo "Building KNEL-Football secure ISO..."
echo "ALL operations run inside Docker container"
echo "Timezone: America/Chicago"
echo "Mandatory: Full disk encryption with LUKS2"
docker run --rm \
--privileged \
--user root \
-v "${SCRIPT_DIR}:/workspace:ro" \
-v "${OUTPUT_DIR}:/output" \
-e TZ="America/Chicago" \
-e DEBIAN_FRONTEND="noninteractive" \
-e LC_ALL="C" \
-e USER_UID="$(id -u)" \
-e USER_GID="$(id -g)" \
"${DOCKER_IMAGE}" \
bash -c '
cd /tmp &&
rm -rf ./* &&
echo "Configuring live-build..." &&
lb config \
--distribution testing \
--architectures amd64 \
--archive-areas "main contrib non-free" \
--mode debian \
--chroot-filesystem squashfs \
--binary-images iso-hybrid \
--iso-application "KNEL-Football Secure OS" \
--iso-publisher "KNEL-Football Security Team" \
--iso-volume "KNEL-Football Secure" \
--debian-installer netinst \
--debian-installer-gui true \
--source false \
--apt-indices false \
--apt-source-archives false &&
if [ -d /workspace/config ]; then
echo "Applying custom configuration..."
cp -r /workspace/config/* ./
fi &&
echo "Starting ISO build..." &&
timeout 3600 lb build &&
ISO_FILE=$(find . -name "*.iso" -type f | head -1) &&
if [ -n "$ISO_FILE" ]; then
echo "ISO created: $ISO_FILE"
sha256sum "$ISO_FILE" > "${ISO_FILE}.sha256"
md5sum "$ISO_FILE" > "${ISO_FILE}.md5"
FINAL_ISO="knel-football-secure.iso"
mv "$ISO_FILE" "$FINAL_ISO"
mv "${ISO_FILE}.sha256" "${FINAL_ISO}.sha256"
mv "${ISO_FILE}.md5" "${FINAL_ISO}.md5"
USER_UID=${USER_UID:-1000}
USER_GID=${USER_GID:-1000}
chown "$USER_UID:$USER_GID" "$FINAL_ISO" "${FINAL_ISO}.sha256" "${FINAL_ISO}.md5"
cp "$FINAL_ISO" "${FINAL_ISO}.sha256" "${FINAL_ISO}.md5" /output/
chown "$USER_UID:$USER_GID" /output/"$FINAL_ISO" /output/"${FINAL_ISO}.sha256" /output/"${FINAL_ISO}.md5"
echo "ISO build completed"
ls -lh /output/
else
echo "ISO build failed"
exit 1
fi
' 2>&1 | tee "$BUILD_LOG"
;;
monitor)
monitor_build "${2:-180}"
;;
test:iso)
shift # Remove 'test:iso' from args
local subcmd="${1:-help}"
case "$subcmd" in
check)
vm_check_prerequisites
;;
create)
vm_check_prerequisites && vm_create
;;
console)
vm_console
;;
status)
vm_status
;;
destroy)
vm_destroy
;;
boot-test)
vm_boot_test
;;
secure-boot)
vm_test_secure_boot
;;
fde-test)
vm_test_fde
;;
help|*)
echo "VM Testing Commands:"
echo " check Check prerequisites"
echo " create Create and start test VM"
echo " console Connect to VM console"
echo " status Show VM status"
echo " destroy Destroy VM and cleanup"
echo " boot-test Run automated boot test"
echo " secure-boot Test Secure Boot"
echo " fde-test Test FDE passphrase prompt"
;;
esac
;;
help|*)
usage
;;
esac
}
main "$@"