docs: rewrite README with structured overview and quick start guide
Replaces the minimal project description with a comprehensive README including a component overview table, quick start instructions, common Ansible operations, and links to detailed documentation. Aligns with Red Panda Approval™ standards.
This commit is contained in:
22
terraform/.terraform.lock.hcl
generated
Normal file
22
terraform/.terraform.lock.hcl
generated
Normal file
@@ -0,0 +1,22 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/lxc/incus" {
|
||||
version = "1.0.2"
|
||||
hashes = [
|
||||
"h1:skSyqJPnvwhbfSrmVVY05I/js7qvX8T8Cd182tnTetc=",
|
||||
"zh:0f312afd0bc27c111c5b4e41b6274dfe4401c3b5c60e4bd519425c547c5c2316",
|
||||
"zh:396587c30adce1b57400ecf1a43df8d4fcbdf5172e3e359f58f7147520891546",
|
||||
"zh:40310405f58493af0e68b1040d62286cd5e6d25b96b5e2d1534d155a98375eba",
|
||||
"zh:4991adf7f290ffc840a1123b300163b8db25a6c4b096648c7b576a6661980ed5",
|
||||
"zh:5d71a5c949a5ad01d075f856475e7de95df16b50d52e546a2257e5c56bfa9150",
|
||||
"zh:60e5fde27aa605abab8487d6ed8a8bb66de88f5e1ba31bb05364b4379fde5f83",
|
||||
"zh:63f9b65382bcb88efd0d9aa8422987405fcf00d4f5b63fbe1ae030438fb55eb7",
|
||||
"zh:79acebe8ed9627dffc369058e54bbb933b5568fee02de3cc353274d728c07597",
|
||||
"zh:97170106b7520d7c025ccfe392a0b7c2d172e63f00f656989b08d0b6ece56573",
|
||||
"zh:9c8fc5d4b26dc21e6d75d6ac127502a797d7e9253bd10b236914db51fa1fc4d7",
|
||||
"zh:b2b8cabdfa681efffa3599468257b185f7a7e24ec6e624e57f75920aa1e7c134",
|
||||
"zh:d32129503b83790752482e0d794ffb9b04f7a893cc113d834654a8ddb028402f",
|
||||
"zh:ebd2fb8d94d72bc28c5655c29c6e6048cc31ef3650d0e166aaf3d82a31673cd5",
|
||||
]
|
||||
}
|
||||
281
terraform/containers.tf
Normal file
281
terraform/containers.tf
Normal file
@@ -0,0 +1,281 @@
|
||||
locals {
|
||||
# Common cloud-init configuration
|
||||
base_cloud_init = <<EOT
|
||||
#cloud-config
|
||||
package_update: true
|
||||
packages:
|
||||
- apt-utils
|
||||
- openssh-server
|
||||
users:
|
||||
- name: ${var.system_user}
|
||||
uid: ${var.user_uid}
|
||||
system: true
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
shell: /bin/bash
|
||||
groups: sudo
|
||||
lock_passwd: true
|
||||
EOT
|
||||
|
||||
# Uranian host definitions - Red Panda Approved
|
||||
uranian_hosts = {
|
||||
oberon = {
|
||||
description = "Docker Host + MCP Switchboard - King of Fairies orchestrating containers and managing MCP infrastructure"
|
||||
remote = "local"
|
||||
role = "container_orchestration"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "app_ports"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25580-25599"
|
||||
connect = "tcp:127.0.0.1:25580-25599"
|
||||
}
|
||||
}]
|
||||
}
|
||||
portia = {
|
||||
description = "PostgreSQL Host - Intelligent database justice"
|
||||
role = "database"
|
||||
image = "noble"
|
||||
config = {}
|
||||
devices = []
|
||||
}
|
||||
ariel = {
|
||||
description = "Neo4j Host - Ethereal graph connections"
|
||||
role = "graph_database"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "neo4j_ports"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25554"
|
||||
connect = "tcp:127.0.0.1:25554"
|
||||
}
|
||||
}]
|
||||
}
|
||||
miranda = {
|
||||
description = "Dedicated Docker Host for MCP Servers - Curious bridge between worlds"
|
||||
role = "mcp_docker_host"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "mcp_containers"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25530-25539"
|
||||
connect = "tcp:127.0.0.1:25530-25539"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "mcpo_ports"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25560-25569"
|
||||
connect = "tcp:127.0.0.1:25560-25569"
|
||||
}
|
||||
}]
|
||||
}
|
||||
sycorax = {
|
||||
description = "Arke LLM Proxy - Original magical language power"
|
||||
role = "language_models"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "arke_ports"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25540-25544"
|
||||
connect = "tcp:127.0.0.1:25540-25544"
|
||||
}
|
||||
}]
|
||||
}
|
||||
puck = {
|
||||
description = "Python App Host - Shape-shifting trickster"
|
||||
role = "application_runtime"
|
||||
image = "questing"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "puck_ports"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25570-25579"
|
||||
connect = "tcp:127.0.0.1:25570-25579"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "puck_rdp"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25520"
|
||||
connect = "tcp:127.0.0.1:3389"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "gpu"
|
||||
type = "gpu"
|
||||
properties = {}
|
||||
}
|
||||
]
|
||||
}
|
||||
caliban = {
|
||||
description = "Agent S MCP Server - Autonomous computer agent learning through environmental interaction"
|
||||
role = "agent_automation"
|
||||
image = "questing"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "caliban"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25521"
|
||||
connect = "tcp:127.0.0.1:3389"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "gpu"
|
||||
type = "gpu"
|
||||
properties = {}
|
||||
}]
|
||||
}
|
||||
prospero = {
|
||||
description = "Master magician observing events - PPLG observability stack with internal HAProxy"
|
||||
role = "observability"
|
||||
image = "noble"
|
||||
config = {}
|
||||
devices = [
|
||||
{
|
||||
name = "https_internal"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25510"
|
||||
connect = "tcp:127.0.0.1:443"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "http_redirect"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:25511"
|
||||
connect = "tcp:127.0.0.1:80"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
titania = {
|
||||
description = "Proxy & SSO Services - Queen of the fairies managing access and authentication"
|
||||
role = "proxy_sso"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [
|
||||
{
|
||||
name = "https_standard"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:443"
|
||||
connect = "tcp:127.0.0.1:8443"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "http_redirect"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:80"
|
||||
connect = "tcp:127.0.0.1:8080"
|
||||
}
|
||||
},
|
||||
{
|
||||
name = "gitea_ssh"
|
||||
type = "proxy"
|
||||
properties = {
|
||||
listen = "tcp:0.0.0.0:22022"
|
||||
connect = "tcp:127.0.0.1:22022"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
rosalind = {
|
||||
description = "Nextcloud Host - Witty and resourceful moon for cloud collaboration (PHP, Go, Node.js runtimes)"
|
||||
role = "collaboration"
|
||||
image = "noble"
|
||||
config = {
|
||||
"security.nesting" = true
|
||||
"raw.lxc" = "lxc.apparmor.profile=unconfined"
|
||||
}
|
||||
devices = [{
|
||||
name = "nextcloud_data"
|
||||
type = "disk"
|
||||
properties = {
|
||||
source = "nextcloud-data"
|
||||
pool = "default"
|
||||
path = "/mnt/nextcloud"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
images = {
|
||||
noble = incus_image.noble.fingerprint
|
||||
plucky = incus_image.plucky.fingerprint
|
||||
questing = incus_image.questing.fingerprint
|
||||
}
|
||||
}
|
||||
|
||||
resource "incus_instance" "uranian_hosts" {
|
||||
for_each = local.uranian_hosts
|
||||
|
||||
name = each.key
|
||||
project = var.project_name
|
||||
profiles = [var.profile_name]
|
||||
image = local.images[each.value.image]
|
||||
ephemeral = false
|
||||
|
||||
dynamic "device" {
|
||||
for_each = lookup(each.value, "devices", [])
|
||||
content {
|
||||
name = device.value.name
|
||||
type = device.value.type
|
||||
properties = device.value.properties
|
||||
}
|
||||
}
|
||||
|
||||
config = merge(
|
||||
{
|
||||
"user.access_Interface" = "eth0"
|
||||
"cloud-init.user-data" = local.base_cloud_init
|
||||
"user.Environment" = "sandbox"
|
||||
"user.ManagedBy" = "terraform"
|
||||
"user.Role" = each.value.role
|
||||
},
|
||||
each.value.config
|
||||
)
|
||||
|
||||
file {
|
||||
target_path = "/home/${var.system_user}/.ssh/authorized_keys"
|
||||
source_path = var.ssh_key_path
|
||||
uid = var.user_uid
|
||||
gid = var.user_uid
|
||||
mode = 0750
|
||||
create_directories = true
|
||||
}
|
||||
}
|
||||
68
terraform/main.tf
Normal file
68
terraform/main.tf
Normal file
@@ -0,0 +1,68 @@
|
||||
resource "incus_project" "agathos" {
|
||||
name = var.project_name
|
||||
description = "Agathos Project"
|
||||
remote = "local"
|
||||
config = {
|
||||
"features.storage.volumes" = true
|
||||
"features.images" = true
|
||||
"features.profiles" = true
|
||||
"features.storage.buckets" = true
|
||||
"features.networks" = false
|
||||
}
|
||||
}
|
||||
|
||||
resource "incus_profile" "sandbox" {
|
||||
name = var.profile_name
|
||||
project = var.project_name
|
||||
description = "Sandbox profile for Uranian hosts"
|
||||
remote = "local"
|
||||
config = {
|
||||
"limits.memory" = "25GB"
|
||||
"limits.cpu" = "13"
|
||||
}
|
||||
|
||||
device {
|
||||
name = "eth0"
|
||||
type = "nic"
|
||||
properties = {
|
||||
nictype = "bridged"
|
||||
parent = var.network_name
|
||||
}
|
||||
}
|
||||
|
||||
device {
|
||||
type = "disk"
|
||||
name = "root"
|
||||
properties = {
|
||||
pool = "default"
|
||||
path = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "incus_image" "noble" {
|
||||
project = var.project_name
|
||||
|
||||
source_image = {
|
||||
remote = "images"
|
||||
name = "ubuntu/noble/cloud"
|
||||
}
|
||||
}
|
||||
|
||||
resource "incus_image" "plucky" {
|
||||
project = var.project_name
|
||||
|
||||
source_image = {
|
||||
remote = "images"
|
||||
name = "ubuntu/plucky/cloud"
|
||||
}
|
||||
}
|
||||
|
||||
resource "incus_image" "questing" {
|
||||
project = var.project_name
|
||||
|
||||
source_image = {
|
||||
remote = "images"
|
||||
name = "ubuntu/questing/cloud"
|
||||
}
|
||||
}
|
||||
38
terraform/outputs.tf
Normal file
38
terraform/outputs.tf
Normal file
@@ -0,0 +1,38 @@
|
||||
output "uranian_hosts" {
|
||||
description = "Information about Uranian host containers"
|
||||
value = {
|
||||
for name, instance in incus_instance.uranian_hosts : name => {
|
||||
name = instance.name
|
||||
ipv4 = instance.ipv4_address
|
||||
description = local.uranian_hosts[name].description
|
||||
role = local.uranian_hosts[name].role
|
||||
security_nesting = lookup(local.uranian_hosts[name].config, "security.nesting", false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "project_info" {
|
||||
description = "Agathos project information"
|
||||
value = {
|
||||
name = incus_project.agathos.name
|
||||
description = incus_project.agathos.description
|
||||
}
|
||||
}
|
||||
|
||||
output "agathos_inventory" {
|
||||
description = "Host inventory for documentation (sandbox.html) and DHCP/DNS provisioning reference"
|
||||
value = {
|
||||
uranian_hosts = {
|
||||
hosts = {
|
||||
for name, instance in incus_instance.uranian_hosts : name => {
|
||||
name = instance.name
|
||||
ipv4 = instance.ipv4_address
|
||||
role = local.uranian_hosts[name].role
|
||||
description = local.uranian_hosts[name].description
|
||||
image = local.uranian_hosts[name].image
|
||||
security_nesting = lookup(local.uranian_hosts[name].config, "security.nesting", false)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
8
terraform/ssh_key_update.sh
Executable file
8
terraform/ssh_key_update.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
# Get host info from terraform output and add SSH keys for both IP and hostname.incus
|
||||
terraform output -json uranian_hosts | jq -r 'to_entries[] | "\(.key) \(.value.ipv4)"' | while read hostname ip; do
|
||||
# Add key for IP address
|
||||
ssh-keyscan -H "$ip" >> ~/.ssh/known_hosts
|
||||
# Add key for hostname.incus
|
||||
ssh-keyscan -H "$hostname.incus" >> ~/.ssh/known_hosts
|
||||
done
|
||||
98
terraform/storage.tf
Normal file
98
terraform/storage.tf
Normal file
@@ -0,0 +1,98 @@
|
||||
# Storage Resources for Agathos Containers
|
||||
# Provisions Incus storage volumes and S3 buckets with access keys
|
||||
|
||||
# Storage volume for Nextcloud data
|
||||
resource "incus_storage_volume" "nextcloud_data" {
|
||||
name = "nextcloud-data"
|
||||
pool = var.storage_pool
|
||||
project = var.project_name
|
||||
|
||||
config = {
|
||||
size = "100GB"
|
||||
}
|
||||
}
|
||||
|
||||
# S3 bucket for Lobechat file storage
|
||||
resource "incus_storage_bucket" "lobechat" {
|
||||
name = "lobechat"
|
||||
pool = var.storage_pool
|
||||
project = var.project_name
|
||||
description = "Lobechat file storage bucket"
|
||||
}
|
||||
|
||||
# Access key for Lobechat S3 bucket
|
||||
resource "incus_storage_bucket_key" "lobechat_key" {
|
||||
name = "lobechat-access"
|
||||
pool = incus_storage_bucket.lobechat.pool
|
||||
storage_bucket = incus_storage_bucket.lobechat.name
|
||||
project = var.project_name
|
||||
role = "admin"
|
||||
}
|
||||
|
||||
# S3 bucket for Casdoor file storage
|
||||
resource "incus_storage_bucket" "casdoor" {
|
||||
name = "casdoor"
|
||||
pool = var.storage_pool
|
||||
project = var.project_name
|
||||
description = "Casdoor file storage bucket"
|
||||
}
|
||||
|
||||
# Access key for Casdoor S3 bucket
|
||||
resource "incus_storage_bucket_key" "casdoor_key" {
|
||||
name = "casdoor-access"
|
||||
pool = incus_storage_bucket.casdoor.pool
|
||||
storage_bucket = incus_storage_bucket.casdoor.name
|
||||
project = var.project_name
|
||||
role = "admin"
|
||||
}
|
||||
|
||||
# S3 bucket for Spelunker file storage
|
||||
resource "incus_storage_bucket" "spelunker" {
|
||||
name = "spelunker"
|
||||
pool = var.storage_pool
|
||||
project = var.project_name
|
||||
description = "Spelunker file storage bucket"
|
||||
}
|
||||
|
||||
# Access key for Spelunker S3 bucket
|
||||
resource "incus_storage_bucket_key" "spelunker_key" {
|
||||
name = "spelunker-access"
|
||||
pool = incus_storage_bucket.spelunker.pool
|
||||
storage_bucket = incus_storage_bucket.spelunker.name
|
||||
project = var.project_name
|
||||
role = "admin"
|
||||
}
|
||||
|
||||
# Outputs for S3 credentials (to be stored in Ansible vault)
|
||||
output "lobechat_s3_credentials" {
|
||||
description = "Lobechat S3 bucket credentials - store in vault as vault_lobechat_s3_*"
|
||||
value = {
|
||||
bucket = incus_storage_bucket.lobechat.name
|
||||
access_key = incus_storage_bucket_key.lobechat_key.access_key
|
||||
secret_key = incus_storage_bucket_key.lobechat_key.secret_key
|
||||
endpoint = "https://${incus_storage_bucket.lobechat.location}"
|
||||
}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "casdoor_s3_credentials" {
|
||||
description = "Casdoor S3 bucket credentials - store in vault as vault_casdoor_s3_*"
|
||||
value = {
|
||||
bucket = incus_storage_bucket.casdoor.name
|
||||
access_key = incus_storage_bucket_key.casdoor_key.access_key
|
||||
secret_key = incus_storage_bucket_key.casdoor_key.secret_key
|
||||
endpoint = "https://${incus_storage_bucket.casdoor.location}"
|
||||
}
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "spelunker_s3_credentials" {
|
||||
description = "Spelunker S3 bucket credentials - store in vault as vault_spelunker_s3_*"
|
||||
value = {
|
||||
bucket = incus_storage_bucket.spelunker.name
|
||||
access_key = incus_storage_bucket_key.spelunker_key.access_key
|
||||
secret_key = incus_storage_bucket_key.spelunker_key.secret_key
|
||||
endpoint = "https://${incus_storage_bucket.spelunker.location}"
|
||||
}
|
||||
sensitive = true
|
||||
}
|
||||
41
terraform/variables.tf
Normal file
41
terraform/variables.tf
Normal file
@@ -0,0 +1,41 @@
|
||||
variable "project_name" {
|
||||
description = "Name of the Incus project for sandbox environment"
|
||||
type = string
|
||||
default = "agathos"
|
||||
}
|
||||
|
||||
variable "profile_name" {
|
||||
description = "Name of the Incus profile for sandbox hosts"
|
||||
type = string
|
||||
default = "sandbox"
|
||||
}
|
||||
|
||||
variable "network_name" {
|
||||
description = "Name of the network bridge"
|
||||
type = string
|
||||
default = "incusbr0"
|
||||
}
|
||||
|
||||
variable "storage_pool" {
|
||||
description = "Name of the storage pool"
|
||||
type = string
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "system_user" {
|
||||
description = "System user name for sandbox hosts"
|
||||
type = string
|
||||
default = "robert"
|
||||
}
|
||||
|
||||
variable "user_uid" {
|
||||
description = "System user UID"
|
||||
type = number
|
||||
default = 1000
|
||||
}
|
||||
|
||||
variable "ssh_key_path" {
|
||||
description = "Path to SSH authorized keys file"
|
||||
type = string
|
||||
default = "~/.ssh/authorized_keys"
|
||||
}
|
||||
13
terraform/versions.tf
Normal file
13
terraform/versions.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
incus = {
|
||||
source = "lxc/incus"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "incus" {
|
||||
# Configuration will be read from environment or default socket
|
||||
}
|
||||
Reference in New Issue
Block a user