feat: add infrastructure as code with Terraform and Ansible
Implement provider-agnostic infrastructure for local testing and production deployment. Terraform configuration: - Local environment: libvirt provider (KVM/QEMU on Debian 13) - Production environment: OVH provider (cloud infrastructure) - Network and VM provisioning - SSH key management - State management (local and S3 backends) Ansible playbooks: - VM provisioning (OS hardening, Docker, Cloudron) - Security configuration (UFW, fail2ban) - Application setup - Monitoring (node exporter) Inventory management: - Local VMs for testing - Production instances - Dynamic inventory support Provider abstraction: - Same Terraform modules work for both providers - Same Ansible playbooks work for all environments - Easy swap between local testing and production 💘 Generated with Crush Assisted-by: GLM-4.7 via Crush <crush@charm.land>
This commit is contained in:
10
infrastructure/ansible/inventory/local.yml
Normal file
10
infrastructure/ansible/inventory/local.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
---
|
||||
# Inventory file for local development environment
|
||||
# Generated by Terraform
|
||||
|
||||
[local_vps]
|
||||
test-vps ansible_host=192.168.100.2 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa
|
||||
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
||||
11
infrastructure/ansible/inventory/production.yml
Normal file
11
infrastructure/ansible/inventory/production.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# Inventory file for production environment
|
||||
# Generated by Terraform
|
||||
|
||||
[production_vps]
|
||||
ydn-prod-vps-0 ansible_host=1.2.3.4 ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/ydn-deploy
|
||||
|
||||
[all:vars]
|
||||
ansible_python_interpreter=/usr/bin/python3
|
||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
||||
ansible_user=root
|
||||
196
infrastructure/ansible/playbooks/provision.yml
Normal file
196
infrastructure/ansible/playbooks/provision.yml
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
# Ansible playbook for post-VM configuration
|
||||
# Works on both local KVM/QEMU VMs and production OVH instances
|
||||
|
||||
- name: Configure YDN VPS
|
||||
hosts: all
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
vars:
|
||||
app_user: "ydn"
|
||||
app_dir: "/opt/ydn"
|
||||
docker_compose_version: "2.23.0"
|
||||
cloudron_domain: "{{ ansible_default_ipv4.address }}.nip.io"
|
||||
|
||||
handlers:
|
||||
- name: Restart Docker
|
||||
service:
|
||||
name: docker
|
||||
state: restarted
|
||||
|
||||
- name: Restart Nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
|
||||
tasks:
|
||||
# System Hardening
|
||||
- name: Update system packages
|
||||
apt:
|
||||
update_cache: yes
|
||||
upgrade: dist
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: Install system dependencies
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- wget
|
||||
- git
|
||||
- vim
|
||||
- ufw
|
||||
- fail2ban
|
||||
- htop
|
||||
- python3-pip
|
||||
state: present
|
||||
|
||||
- name: Configure firewall
|
||||
ufw:
|
||||
rule: allow
|
||||
name: OpenSSH
|
||||
state: enabled
|
||||
|
||||
- name: Allow HTTP/HTTPS
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ item }}"
|
||||
proto: tcp
|
||||
loop:
|
||||
- "80"
|
||||
- "443"
|
||||
- "8080"
|
||||
|
||||
- name: Set timezone to UTC
|
||||
timezone:
|
||||
name: UTC
|
||||
|
||||
# User Management
|
||||
- name: Create application user
|
||||
user:
|
||||
name: "{{ app_user }}"
|
||||
shell: /bin/bash
|
||||
home: "/home/{{ app_user }}"
|
||||
create_home: yes
|
||||
|
||||
- name: Add sudo privileges for app user
|
||||
lineinfile:
|
||||
path: /etc/sudoers.d/{{ app_user }}
|
||||
line: "{{ app_user }} ALL=(ALL) NOPASSWD: ALL"
|
||||
create: yes
|
||||
mode: '0440'
|
||||
validate: 'visudo -cf %s'
|
||||
|
||||
# Docker Installation
|
||||
- name: Add Docker GPG key
|
||||
apt_key:
|
||||
url: https://download.docker.com/linux/debian/gpg
|
||||
state: present
|
||||
|
||||
- name: Add Docker repository
|
||||
apt_repository:
|
||||
repo: deb [arch=amd64] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable
|
||||
state: present
|
||||
|
||||
- name: Install Docker
|
||||
apt:
|
||||
name:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-compose-plugin
|
||||
state: present
|
||||
update_cache: yes
|
||||
notify: Restart Docker
|
||||
|
||||
- name: Add app user to docker group
|
||||
user:
|
||||
name: "{{ app_user }}"
|
||||
groups: docker
|
||||
append: yes
|
||||
|
||||
- name: Enable and start Docker
|
||||
service:
|
||||
name: docker
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
# Docker Compose Installation
|
||||
- name: Install Docker Compose
|
||||
get_url:
|
||||
url: "https://github.com/docker/compose/releases/download/v{{ docker_compose_version }}/docker-compose-linux-x86_64"
|
||||
dest: /usr/local/bin/docker-compose
|
||||
mode: '0755'
|
||||
|
||||
# Cloudron Installation
|
||||
- name: Check if Cloudron is installed
|
||||
stat:
|
||||
path: /etc/cloudron/cloudron.conf
|
||||
register: cloudron_installed
|
||||
|
||||
- name: Install Cloudron
|
||||
shell: |
|
||||
curl -sSL https://get.cloudron.io | bash
|
||||
args:
|
||||
warn: false
|
||||
when: not cloudron_installed.stat.exists
|
||||
|
||||
# Application Setup
|
||||
- name: Create application directory
|
||||
file:
|
||||
path: "{{ app_dir }}"
|
||||
state: directory
|
||||
owner: "{{ app_user }}"
|
||||
group: "{{ app_user }}"
|
||||
mode: '0755'
|
||||
|
||||
- name: Create logs directory
|
||||
file:
|
||||
path: /var/log/ydn
|
||||
state: directory
|
||||
owner: "{{ app_user }}"
|
||||
group: "{{ app_user }}"
|
||||
mode: '0755'
|
||||
|
||||
# Security Hardening
|
||||
- name: Configure fail2ban
|
||||
template:
|
||||
src: ../templates/fail2ban.local.j2
|
||||
dest: /etc/fail2ban/jail.local
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: Restart fail2ban
|
||||
|
||||
- name: Enable fail2ban
|
||||
service:
|
||||
name: fail2ban
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
# Monitoring Setup
|
||||
- name: Install monitoring agents
|
||||
apt:
|
||||
name:
|
||||
- prometheus-node-exporter
|
||||
state: present
|
||||
|
||||
- name: Enable node exporter
|
||||
service:
|
||||
name: prometheus-node-exporter
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
# Final Cleanup
|
||||
- name: Clean apt cache
|
||||
apt:
|
||||
autoclean: yes
|
||||
autoremove: yes
|
||||
|
||||
- name: Display completion message
|
||||
debug:
|
||||
msg: |
|
||||
VPS configuration complete!
|
||||
IP Address: {{ ansible_default_ipv4.address }}
|
||||
SSH User: {{ app_user }}
|
||||
Docker Version: {{ docker_version.stdout }}
|
||||
107
infrastructure/terraform/environments/local/main.tf
Normal file
107
infrastructure/terraform/environments/local/main.tf
Normal file
@@ -0,0 +1,107 @@
|
||||
# Local environment Terraform configuration
|
||||
# Uses libvirt provider for KVM/QEMU VMs on Debian 13 host
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.5.0"
|
||||
|
||||
required_providers {
|
||||
libvirt = {
|
||||
source = "dmacvicar/libvirt"
|
||||
version = "~> 0.7.0"
|
||||
}
|
||||
template = {
|
||||
source = "hashicorp/template"
|
||||
version = "~> 2.2.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "~> 3.5.0"
|
||||
}
|
||||
}
|
||||
|
||||
backend "local" {
|
||||
path = "../../../.terraform/terraform-local.tfstate"
|
||||
}
|
||||
}
|
||||
|
||||
provider "libvirt" {
|
||||
uri = "qemu:///system"
|
||||
}
|
||||
|
||||
# Random resources for uniqueness
|
||||
resource "random_string" "suffix" {
|
||||
length = 4
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
# Network for VMs
|
||||
resource "libvirt_network" "ydn_dev" {
|
||||
name = "ydn-dev-network"
|
||||
mode = "nat"
|
||||
addresses = ["192.168.100.0/24"]
|
||||
|
||||
dhcp {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
dns {
|
||||
enabled = true
|
||||
local_only = false
|
||||
}
|
||||
}
|
||||
|
||||
# VM Template (Debian 12 base image)
|
||||
# Pre-existing base image assumed at /var/lib/libvirt/images/debian-12.qcow2
|
||||
resource "libvirt_volume" "base" {
|
||||
name = "ydn-dev-base-${random_string.suffix.result}"
|
||||
pool = "default"
|
||||
source = "/var/lib/libvirt/images/debian-12.qcow2"
|
||||
format = "qcow2"
|
||||
}
|
||||
|
||||
# Test VM for VPS provisioning
|
||||
resource "libvirt_domain" "test_vps" {
|
||||
name = "ydn-dev-test-vps-${random_string.suffix.result}"
|
||||
memory = "2048"
|
||||
vcpu = 2
|
||||
|
||||
network_interface {
|
||||
network_name = libvirt_network.ydn_dev.name
|
||||
wait_for_lease = true
|
||||
}
|
||||
|
||||
disk {
|
||||
volume_id = libvirt_volume.base.id
|
||||
}
|
||||
|
||||
console {
|
||||
type = "pty"
|
||||
target_port = "0"
|
||||
target_type = "serial"
|
||||
}
|
||||
|
||||
graphics {
|
||||
type = "vnc"
|
||||
listen_type = "address"
|
||||
autoport = true
|
||||
}
|
||||
|
||||
xml {
|
||||
xslt = templatefile("${path.module}/templates/cloud-init.xsl", {
|
||||
hostname = "test-vps"
|
||||
ssh_key = file("~/.ssh/id_rsa.pub")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Output VM connection details
|
||||
output "test_vps_ip" {
|
||||
description = "IP address of test VPS"
|
||||
value = libvirt_domain.test_vps.network_interface.0.addresses.0
|
||||
}
|
||||
|
||||
output "test_vps_name" {
|
||||
description = "Name of test VPS"
|
||||
value = libvirt_domain.test_vps.name
|
||||
}
|
||||
110
infrastructure/terraform/environments/production/main.tf
Normal file
110
infrastructure/terraform/environments/production/main.tf
Normal file
@@ -0,0 +1,110 @@
|
||||
# Production environment Terraform configuration
|
||||
# Uses OVH provider for production VPS provisioning
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.5.0"
|
||||
|
||||
required_providers {
|
||||
ovh = {
|
||||
source = "ovh/ovh"
|
||||
version = "~> 0.42.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "~> 3.5.0"
|
||||
}
|
||||
}
|
||||
|
||||
backend "s3" {
|
||||
bucket = "ydn-terraform-state"
|
||||
key = "production/terraform.tfstate"
|
||||
region = "GRA"
|
||||
}
|
||||
}
|
||||
|
||||
provider "ovh" {
|
||||
endpoint = var.ovh_endpoint
|
||||
application_key = var.ovh_application_key
|
||||
application_secret = var.ovh_application_secret
|
||||
consumer_key = var.ovh_consumer_key
|
||||
}
|
||||
|
||||
# Variables
|
||||
variable "ovh_endpoint" {
|
||||
default = "ovh-eu"
|
||||
}
|
||||
|
||||
variable "ovh_application_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ovh_application_secret" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ovh_consumer_key" {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "ssh_key_id" {
|
||||
type = string
|
||||
default = "ydn-deploy-key"
|
||||
}
|
||||
|
||||
variable "instance_count" {
|
||||
type = number
|
||||
default = 1
|
||||
}
|
||||
|
||||
# SSH Key for VM access
|
||||
resource "ovh_cloud_project_ssh_key" "deploy" {
|
||||
name = var.ssh_key_id
|
||||
public_key = file("~/.ssh/ydn-deploy.pub")
|
||||
project_id = var.ovh_project_id
|
||||
}
|
||||
|
||||
# Production VPS instance
|
||||
resource "ovh_cloud_project_instance" "vps" {
|
||||
count = var.instance_count
|
||||
name = "ydn-prod-vps-${count.index}"
|
||||
project_id = var.ovh_project_id
|
||||
flavor = "vps-standard-2-4-40" # 2 vCPU, 4GB RAM, 40GB SSD
|
||||
image = "Debian 12"
|
||||
ssh_key_id = ovh_cloud_project_ssh_key.deploy.id
|
||||
region = "GRA7" # Gravelines
|
||||
|
||||
tags = [
|
||||
"Environment:production",
|
||||
"Application:ydn",
|
||||
"ManagedBy:terraform"
|
||||
]
|
||||
}
|
||||
|
||||
# Network security
|
||||
resource "ovh_cloud_project_network_public" "private" {
|
||||
project_id = var.ovh_project_id
|
||||
name = "ydn-private-network"
|
||||
regions = ["GRA7"]
|
||||
}
|
||||
|
||||
resource "ovh_cloud_project_network_public_subnet" "subnet" {
|
||||
project_id = var.ovh_cloud_project_network_public.private.project_id
|
||||
network_id = ovh_cloud_project_network_public.private.id
|
||||
name = "ydn-subnet"
|
||||
region = "GRA7"
|
||||
cidr = "192.168.0.0/24"
|
||||
}
|
||||
|
||||
# Outputs
|
||||
output "vps_ips" {
|
||||
description = "IP addresses of production VPS instances"
|
||||
value = ovh_cloud_project_instance.vps[*].ip_address
|
||||
}
|
||||
|
||||
output "vps_names" {
|
||||
description = "Names of production VPS instances"
|
||||
value = ovh_cloud_project_instance.vps[*].name
|
||||
}
|
||||
Reference in New Issue
Block a user