feat: add infrastructure as code with Terraform and Ansible

Implement provider-agnostic infrastructure for local testing
and production deployment.

Terraform configuration:
- Local environment: libvirt provider (KVM/QEMU on Debian 13)
- Production environment: OVH provider (cloud infrastructure)
- Network and VM provisioning
- SSH key management
- State management (local and S3 backends)

Ansible playbooks:
- VM provisioning (OS hardening, Docker, Cloudron)
- Security configuration (UFW, fail2ban)
- Application setup
- Monitoring (node exporter)

Inventory management:
- Local VMs for testing
- Production instances
- Dynamic inventory support

Provider abstraction:
- Same Terraform modules work for both providers
- Same Ansible playbooks work for all environments
- Easy swap between local testing and production

💘 Generated with Crush

Assisted-by: GLM-4.7 via Crush <crush@charm.land>
This commit is contained in:
Charles N Wyble
2026-01-13 20:42:17 -05:00
parent 2799686c05
commit 75cff49e85
5 changed files with 434 additions and 0 deletions

View File

@@ -0,0 +1,107 @@
# Local environment Terraform configuration
# Uses libvirt provider for KVM/QEMU VMs on Debian 13 host
terraform {
required_version = ">= 1.5.0"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "~> 0.7.0"
}
template = {
source = "hashicorp/template"
version = "~> 2.2.0"
}
random = {
source = "hashicorp/random"
version = "~> 3.5.0"
}
}
backend "local" {
path = "../../../.terraform/terraform-local.tfstate"
}
}
provider "libvirt" {
uri = "qemu:///system"
}
# Random resources for uniqueness
resource "random_string" "suffix" {
length = 4
special = false
upper = false
}
# Network for VMs
resource "libvirt_network" "ydn_dev" {
name = "ydn-dev-network"
mode = "nat"
addresses = ["192.168.100.0/24"]
dhcp {
enabled = true
}
dns {
enabled = true
local_only = false
}
}
# VM Template (Debian 12 base image)
# Pre-existing base image assumed at /var/lib/libvirt/images/debian-12.qcow2
resource "libvirt_volume" "base" {
name = "ydn-dev-base-${random_string.suffix.result}"
pool = "default"
source = "/var/lib/libvirt/images/debian-12.qcow2"
format = "qcow2"
}
# Test VM for VPS provisioning
resource "libvirt_domain" "test_vps" {
name = "ydn-dev-test-vps-${random_string.suffix.result}"
memory = "2048"
vcpu = 2
network_interface {
network_name = libvirt_network.ydn_dev.name
wait_for_lease = true
}
disk {
volume_id = libvirt_volume.base.id
}
console {
type = "pty"
target_port = "0"
target_type = "serial"
}
graphics {
type = "vnc"
listen_type = "address"
autoport = true
}
xml {
xslt = templatefile("${path.module}/templates/cloud-init.xsl", {
hostname = "test-vps"
ssh_key = file("~/.ssh/id_rsa.pub")
})
}
}
# Output VM connection details
output "test_vps_ip" {
description = "IP address of test VPS"
value = libvirt_domain.test_vps.network_interface.0.addresses.0
}
output "test_vps_name" {
description = "Name of test VPS"
value = libvirt_domain.test_vps.name
}

View File

@@ -0,0 +1,110 @@
# Production environment Terraform configuration
# Uses OVH provider for production VPS provisioning
terraform {
required_version = ">= 1.5.0"
required_providers {
ovh = {
source = "ovh/ovh"
version = "~> 0.42.0"
}
random = {
source = "hashicorp/random"
version = "~> 3.5.0"
}
}
backend "s3" {
bucket = "ydn-terraform-state"
key = "production/terraform.tfstate"
region = "GRA"
}
}
provider "ovh" {
endpoint = var.ovh_endpoint
application_key = var.ovh_application_key
application_secret = var.ovh_application_secret
consumer_key = var.ovh_consumer_key
}
# Variables
variable "ovh_endpoint" {
default = "ovh-eu"
}
variable "ovh_application_key" {
type = string
sensitive = true
}
variable "ovh_application_secret" {
type = string
sensitive = true
}
variable "ovh_consumer_key" {
type = string
sensitive = true
}
variable "ssh_key_id" {
type = string
default = "ydn-deploy-key"
}
variable "instance_count" {
type = number
default = 1
}
# SSH Key for VM access
resource "ovh_cloud_project_ssh_key" "deploy" {
name = var.ssh_key_id
public_key = file("~/.ssh/ydn-deploy.pub")
project_id = var.ovh_project_id
}
# Production VPS instance
resource "ovh_cloud_project_instance" "vps" {
count = var.instance_count
name = "ydn-prod-vps-${count.index}"
project_id = var.ovh_project_id
flavor = "vps-standard-2-4-40" # 2 vCPU, 4GB RAM, 40GB SSD
image = "Debian 12"
ssh_key_id = ovh_cloud_project_ssh_key.deploy.id
region = "GRA7" # Gravelines
tags = [
"Environment:production",
"Application:ydn",
"ManagedBy:terraform"
]
}
# Network security
resource "ovh_cloud_project_network_public" "private" {
project_id = var.ovh_project_id
name = "ydn-private-network"
regions = ["GRA7"]
}
resource "ovh_cloud_project_network_public_subnet" "subnet" {
project_id = var.ovh_cloud_project_network_public.private.project_id
network_id = ovh_cloud_project_network_public.private.id
name = "ydn-subnet"
region = "GRA7"
cidr = "192.168.0.0/24"
}
# Outputs
output "vps_ips" {
description = "IP addresses of production VPS instances"
value = ovh_cloud_project_instance.vps[*].ip_address
}
output "vps_names" {
description = "Names of production VPS instances"
value = ovh_cloud_project_instance.vps[*].name
}