docs: rewrite README with structured overview and quick start guide

Replaces the minimal project description with a comprehensive README
including a component overview table, quick start instructions, common
Ansible operations, and links to detailed documentation. Aligns with
Red Panda Approval™ standards.
This commit is contained in:
2026-03-03 12:49:06 +00:00
parent c7be03a743
commit b4d60f2f38
219 changed files with 34586 additions and 2 deletions

1
ansible/.vault_pass Normal file
View File

@@ -0,0 +1 @@
redpanda_approved_vault_password

View File

@@ -0,0 +1,63 @@
---
# Create Harper User Account
# Creates the harper user on all ubuntu hosts and deploys SSH authorized keys
#
# Usage:
# ansible-playbook adduser_harper.yml
#
# Target specific host:
# ansible-playbook adduser_harper.yml --limit ariel.incus
- name: Create Harper User Account
hosts: ubuntu
become: true
vars:
harper_user:
name: harper
comment: "Harper - Autonomous Agent"
shell: /bin/bash
groups:
- sudo
tasks:
- name: Create harper user account
ansible.builtin.user:
name: "{{ harper_user.name }}"
comment: "{{ harper_user.comment }}"
shell: "{{ harper_user.shell }}"
groups: "{{ harper_user.groups }}"
append: true
create_home: true
state: present
- name: Ensure .ssh directory exists for harper
ansible.builtin.file:
path: "/home/{{ harper_user.name }}/.ssh"
state: directory
mode: '0700'
owner: "{{ harper_user.name }}"
group: "{{ harper_user.name }}"
- name: Get harper keys from ssh_authorized_users
ansible.builtin.set_fact:
harper_keys: "{{ ssh_authorized_users | selectattr('name', 'equalto', 'harper') | map(attribute='keys') | first | default([]) }}"
- name: Deploy authorized keys for harper
ansible.posix.authorized_key:
user: "{{ harper_user.name }}"
key: "{{ item }}"
state: present
exclusive: false
loop: "{{ harper_keys }}"
loop_control:
label: "{{ item | truncate(50) }}"
when: harper_keys | length > 0
- name: Configure passwordless sudo for harper
ansible.builtin.lineinfile:
path: /etc/sudoers.d/harper
line: "harper ALL=(ALL) NOPASSWD:ALL"
create: true
mode: '0440'
validate: "visudo -cf %s"

View File

@@ -0,0 +1,57 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "neo4j_logs" {
listener {
address = "127.0.0.1:{{neo4j_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,24 @@
// Default Alloy Configuration
// Standard system monitoring and log collection
logging {
level = "{{alloy_log_level}}"
format = "logfmt"
}
// Loki log forwarding
loki.write "default" {
endpoint {
url = "{{ loki_url }}"
}
}
// System log collection
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{ inventory_hostname }}",
environment = "{{ deployment_environment }}",
}
}

116
ansible/alloy/deploy.yml Normal file
View File

@@ -0,0 +1,116 @@
---
- name: Deploy Alloy to All Ubuntu Hosts
hosts: ubuntu
tasks:
- name: Check if host has alloy service
ansible.builtin.set_fact:
has_alloy_service: "{{'alloy' in services}}"
- name: Skip hosts without alloy service
ansible.builtin.meta: end_host
when: not has_alloy_service
- name: Add Grafana repository
become: true
ansible.builtin.deb822_repository:
name: grafana
types: [deb]
uris: https://apt.grafana.com
suites: [stable]
components: [main]
signed_by: https://apt.grafana.com/gpg.key
state: present
- name: Install Alloy
become: true
ansible.builtin.apt:
name: alloy
state: present
update_cache: true
- name: Check for host-specific Alloy config
ansible.builtin.stat:
path: "{{playbook_dir}}/{{inventory_hostname_short}}/config.alloy.j2"
register: host_specific_config
delegate_to: localhost
connection: local
- name: Create Alloy configuration (host-specific)
become: true
ansible.builtin.template:
src: "{{ inventory_hostname_short }}/config.alloy.j2"
dest: /etc/alloy/config.alloy
owner: alloy
group: alloy
mode: '644'
when: host_specific_config.stat.exists
notify: restart alloy
- name: Create Alloy configuration (default)
become: true
ansible.builtin.template:
src: config.alloy.j2
dest: /etc/alloy/config.alloy
owner: alloy
group: alloy
mode: '644'
when: not host_specific_config.stat.exists
notify: restart alloy
- name: Check if host has docker service
ansible.builtin.set_fact:
has_docker_service: "{{'docker' in services}}"
- name: Add alloy user to docker group for cAdvisor
become: true
ansible.builtin.user:
name: alloy
groups: docker
append: true
when: has_docker_service
notify: restart alloy
- name: Check if host has gitea service
ansible.builtin.set_fact:
has_gitea_service: "{{'gitea' in services}}"
- name: Add alloy user to gitea group for log collection
become: true
ansible.builtin.user:
name: alloy
groups: git
append: true
when: has_gitea_service
notify: restart alloy
- name: Enable and start Alloy service
become: true
ansible.builtin.systemd:
name: alloy
enabled: true
state: started
daemon_reload: true
- name: Flush handlers to ensure Alloy is restarted if needed
ansible.builtin.meta: flush_handlers
- name: Verify Alloy service is running
become: true
ansible.builtin.systemd:
name: alloy
register: alloy_service_status
- name: Confirm Alloy service is active
ansible.builtin.assert:
that:
- alloy_service_status.status.ActiveState == "active"
fail_msg: "Alloy service is not running (state: {{ alloy_service_status.status.ActiveState }})"
success_msg: "Alloy service is running"
handlers:
- name: restart alloy
become: true
ansible.builtin.systemd:
name: alloy
state: restarted

View File

@@ -0,0 +1,131 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.relabel.journal_apps.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.relabel "journal_apps" {
forward_to = [loki.write.default.receiver]
rule {
source_labels = ["__journal__systemd_unit"]
regex = "mcpo\\.service"
target_label = "job"
replacement = "mcpo"
}
rule {
source_labels = ["__journal__systemd_unit"]
regex = "mcpo\\.service"
target_label = "app"
replacement = "mcpo"
}
}
loki.source.syslog "argos_logs" {
listener {
address = "127.0.0.1:{{argos_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "argos",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "grafana_mcp_logs" {
listener {
address = "127.0.0.1:{{grafana_mcp_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "grafana_mcp",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "neo4j_cypher_logs" {
listener {
address = "127.0.0.1:{{neo4j_cypher_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j-cypher",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "neo4j_memory_logs" {
listener {
address = "127.0.0.1:{{neo4j_memory_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j-memory",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "gitea_mcp_logs" {
listener {
address = "127.0.0.1:{{gitea_mcp_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "gitea-mcp",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "mcp_docker_host"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,98 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "rabbitmq_logs" {
listener {
address = "127.0.0.1:{{rabbitmq_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "rabbitmq",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "searxng_logs" {
listener {
address = "127.0.0.1:{{searxng_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "searxng",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "smtp4dev_logs" {
listener {
address = "127.0.0.1:{{smtp4dev_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "smtp4dev",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.scrape "hass" {
targets = [{
__address__ = "127.0.0.1:{{hass_port}}",
job = "hass",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}]
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "60s"
metrics_path = "/api/prometheus"
bearer_token = "{{hass_metrics_token}}"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,195 @@
// Prospero Alloy Configuration
// Red Panda Approved 🐼
// Services: PPLG stack (Grafana, Prometheus, Loki, Alertmanager, PgAdmin, HAProxy, OAuth2-Proxy)
logging {
level = "{{alloy_log_level}}"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
// System log files
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
// PPLG HAProxy syslog receiver (HAProxy syslog → Alloy → Loki)
loki.source.syslog "pplg_haproxy" {
listener {
address = "127.0.0.1:{{pplg_haproxy_syslog_port}}"
protocol = "tcp"
labels = {
job = "pplg-haproxy",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
// Journal relabeling - assign dedicated job labels per systemd unit
loki.relabel "journal" {
forward_to = []
// Expose the systemd unit as a label
rule {
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
// Grafana
rule {
source_labels = ["__journal__systemd_unit"]
regex = "grafana-server\\.service"
target_label = "job"
replacement = "grafana"
}
// Prometheus
rule {
source_labels = ["__journal__systemd_unit"]
regex = "prometheus\\.service"
target_label = "job"
replacement = "prometheus"
}
// Loki
rule {
source_labels = ["__journal__systemd_unit"]
regex = "loki\\.service"
target_label = "job"
replacement = "loki"
}
// Alertmanager
rule {
source_labels = ["__journal__systemd_unit"]
regex = "alertmanager\\.service"
target_label = "job"
replacement = "alertmanager"
}
// PgAdmin
rule {
source_labels = ["__journal__systemd_unit"]
regex = "pgadmin\\.service"
target_label = "job"
replacement = "pgadmin"
}
// OAuth2-Proxy (Prometheus UI)
rule {
source_labels = ["__journal__systemd_unit"]
regex = "oauth2-proxy-prometheus\\.service"
target_label = "job"
replacement = "oauth2-proxy-prometheus"
}
// Alloy
rule {
source_labels = ["__journal__systemd_unit"]
regex = "alloy\\.service"
target_label = "job"
replacement = "alloy"
}
// Default job for unmatched units
rule {
source_labels = ["__journal__systemd_unit"]
regex = ".+"
target_label = "job"
replacement = "systemd"
}
}
// Systemd journal logs with per-service job labels
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
relabel_rules = loki.relabel.journal.rules
labels = {
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
// Loki endpoint
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
// Disabled collectors that don't work in containers: hwmon, thermal, mdadm, powersupplyclass, nvme
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
// Provides: namedprocess_namegroup_* metrics
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// Scrape local exporters
prometheus.scrape "local_exporters" {
targets = concat(
prometheus.exporter.unix.default.targets,
prometheus.exporter.process.default.targets,
)
forward_to = [prometheus.relabel.add_instance.receiver]
scrape_interval = "15s"
job_name = "prospero"
}
// Add instance label for Prometheus compatibility
prometheus.relabel "add_instance" {
forward_to = [prometheus.remote_write.default.receiver]
rule {
target_label = "instance"
replacement = "{{inventory_hostname}}"
}
}
// Remote write to Prospero Prometheus
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,196 @@
// Puck Alloy Configuration
// Red Panda Approved 🐼
// Services: Log collection, Process metrics, Docker/cAdvisor metrics
logging {
level = "{{alloy_log_level}}"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "angelia_logs" {
listener {
address = "127.0.0.1:{{angelia_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "angelia",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "athena_logs" {
listener {
address = "127.0.0.1:{{athena_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "athena",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "kairos_logs" {
listener {
address = "127.0.0.1:{{kairos_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "kairos",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "sagittarius_logs" {
listener {
address = "127.0.0.1:{{sagittarius_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "sagittarius",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "spelunker_logs" {
listener {
address = "127.0.0.1:{{spelunker_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "spelunker",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "jupyterlab_logs" {
listener {
address = "127.0.0.1:{{jupyterlab_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "jupyterlab",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
// Disabled collectors that don't work in containers: hwmon, thermal, mdadm, powersupplyclass, nvme
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
// Provides: namedprocess_namegroup_* metrics
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// cAdvisor - Docker container metrics
// Provides: container_* metrics for CPU, memory, network, disk
prometheus.exporter.cadvisor "default" {
docker_host = "unix:///var/run/docker.sock"
storage_duration = "5m"
docker_only = true
}
// Scrape all local exporters
prometheus.scrape "local_exporters" {
targets = concat(
prometheus.exporter.unix.default.targets,
prometheus.exporter.process.default.targets,
prometheus.exporter.cadvisor.default.targets,
)
forward_to = [prometheus.relabel.add_instance.receiver]
scrape_interval = "15s"
job_name = "puck"
}
// Add instance label for Prometheus compatibility
prometheus.relabel "add_instance" {
forward_to = [prometheus.remote_write.default.receiver]
rule {
target_label = "instance"
replacement = "{{inventory_hostname}}"
}
}
// Remote write to Prospero Prometheus
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,155 @@
// Rosalind Alloy Configuration
// Services: Gitea, Lobechat, Nextcloud monitoring
logging {
level = "{{alloy_log_level}}"
format = "logfmt"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
// System log files
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
// Systemd journal logs (includes AnythingLLM server/collector)
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
// Gitea application logs
loki.source.file "gitea_logs" {
targets = [
{__path__ = "/var/log/gitea/gitea.log", job = "gitea"},
]
forward_to = [loki.write.default.receiver]
}
// Apache access and error logs (Nextcloud)
loki.source.file "apache_logs" {
targets = [
{__path__ = "/var/log/apache2/access.log", job = "apache_access"},
{__path__ = "/var/log/apache2/error.log", job = "apache_error"},
]
forward_to = [loki.write.default.receiver]
}
// Lobechat Docker syslog
loki.source.syslog "lobechat_logs" {
listener {
address = "127.0.0.1:{{lobechat_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "lobechat",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
// Loki endpoint
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// cAdvisor - Docker container metrics (for Lobechat)
prometheus.exporter.cadvisor "default" {
docker_host = "unix:///var/run/docker.sock"
store_container_labels = true
docker_only = true
}
// Prometheus scrape configurations
prometheus.scrape "unix" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
prometheus.scrape "process" {
targets = prometheus.exporter.process.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
prometheus.scrape "cadvisor" {
targets = prometheus.exporter.cadvisor.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
// Gitea application metrics
prometheus.scrape "gitea" {
targets = [{
__address__ = "127.0.0.1:{{gitea_web_port}}",
job = "gitea",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}]
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "30s"
metrics_path = "/metrics"
bearer_token = "{{gitea_metrics_token}}"
}
// Prometheus remote write endpoint
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,80 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "haproxy_logs" {
listener {
address = "127.0.0.1:{{haproxy_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "haproxy",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "casdoor_logs" {
listener {
address = "127.0.0.1:{{casdoor_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "casdoor",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.scrape "haproxy" {
targets = [
{"__address__" = "localhost:{{haproxy_stats_port}}", "__metrics_path__" = "/metrics"},
]
scrape_interval = "15s"
forward_to = [prometheus.remote_write.default.receiver]
job_name = "haproxy"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

6
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,6 @@
[defaults]
inventory = inventory
stdout_callback = ansible.builtin.default
result_format = yaml
remote_user = robert
vault_password_file = .vault_pass

View File

@@ -0,0 +1,423 @@
SERVER_PORT=3001
STORAGE_DIR="/app/server/storage"
UID='1000'
GID='1000'
# SIG_KEY='passphrase' # Please generate random string at least 32 chars long.
# SIG_SALT='salt' # Please generate random string at least 32 chars long.
# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
# JWT_EXPIRY="30d" # (optional) https://docs.anythingllm.com/configuration#custom-ttl-for-sessions
###########################################
######## LLM API SElECTION ################
###########################################
# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-4o'
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-2.0-flash-lite'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
# ANTHROPIC_CACHE_CONTROL="5m" # Enable prompt caching (5m=5min cache, 1h=1hour cache). Reduces costs and improves speed by caching system prompts.
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
# LMSTUDIO_AUTH_TOKEN='your-lmstudio-auth-token-here'
# LLM_PROVIDER='localai'
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
# LOCAL_AI_API_KEY="sk-123abc"
# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
# OLLAMA_AUTH_TOKEN='your-ollama-auth-token-here (optional, only for ollama running behind auth - Bearer token)'
# OLLAMA_RESPONSE_TIMEOUT=7200000 (optional, max timeout in milliseconds for ollama response to conclude. Default is 5min before aborting)
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
# LLM_PROVIDER='perplexity'
# PERPLEXITY_API_KEY='my-perplexity-key'
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
# LLM_PROVIDER='openrouter'
# OPENROUTER_API_KEY='my-openrouter-key'
# OPENROUTER_MODEL_PREF='openrouter/auto'
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
# LLM_PROVIDER='groq'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama3-8b-8192
# LLM_PROVIDER='koboldcpp'
# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='textgenwebui'
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc
# GENERIC_OPEN_AI_CUSTOM_HEADERS="X-Custom-Auth:my-secret-key,X-Custom-Header:my-value" (useful if using a proxy that requires authentication or other headers)
# LLM_PROVIDER='litellm'
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# LLM_PROVIDER='novita'
# NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings/key-management
# NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1'
# LLM_PROVIDER='cometapi'
# COMETAPI_LLM_API_KEY='your-cometapi-api-key-here' # Get one at https://api.cometapi.com/console/token
# COMETAPI_LLM_MODEL_PREF='gpt-5-mini'
# COMETAPI_LLM_TIMEOUT_MS=500 # Optional; stream idle timeout in ms (min 500ms)
# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
# LLM_PROVIDER='bedrock'
# AWS_BEDROCK_LLM_ACCESS_KEY_ID=
# AWS_BEDROCK_LLM_ACCESS_KEY=
# AWS_BEDROCK_LLM_REGION=us-west-2
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
# AWS_BEDROCK_LLM_CONNECTION_METHOD=iam
# AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS=4096
# AWS_BEDROCK_LLM_SESSION_TOKEN= # Only required if CONNECTION_METHOD is 'sessionToken'
# or even use Short and Long Term API keys
# AWS_BEDROCK_LLM_CONNECTION_METHOD="apiKey"
# AWS_BEDROCK_LLM_API_KEY=
# LLM_PROVIDER='fireworksai'
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
# LLM_PROVIDER='apipie'
# APIPIE_LLM_API_KEY='sk-123abc'
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
# LLM_PROVIDER='xai'
# XAI_LLM_API_KEY='xai-your-api-key-here'
# XAI_LLM_MODEL_PREF='grok-beta'
# LLM_PROVIDER='zai'
# ZAI_API_KEY="your-zai-api-key-here"
# ZAI_MODEL_PREF="glm-4.5"
# LLM_PROVIDER='nvidia-nim'
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
# LLM_PROVIDER='deepseek'
# DEEPSEEK_API_KEY='your-deepseek-api-key-here'
# DEEPSEEK_MODEL_PREF='deepseek-chat'
# LLM_PROVIDER='ppio'
# PPIO_API_KEY='your-ppio-api-key-here'
# PPIO_MODEL_PREF=deepseek/deepseek-v3/community
# LLM_PROVIDER='moonshotai'
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
# LLM_PROVIDER='foundry'
# FOUNDRY_BASE_PATH='http://127.0.0.1:55776'
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='giteeai'
# GITEE_AI_API_KEY=
# GITEE_AI_MODEL_PREF=
# GITEE_AI_MODEL_TOKEN_LIMIT=
# LLM_PROVIDER='docker-model-runner'
# DOCKER_MODEL_RUNNER_BASE_PATH='http://127.0.0.1:12434'
# DOCKER_MODEL_RUNNER_LLM_MODEL_PREF='phi-3.5-mini'
# DOCKER_MODEL_RUNNER_LLM_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='privatemode'
# PRIVATEMODE_LLM_BASE_PATH='http://127.0.0.1:8080'
# PRIVATEMODE_LLM_MODEL_PREF='gemma-3-27b'
# LLM_PROVIDER='sambanova'
# SAMBANOVA_LLM_API_KEY='xxx-xxx-xxx'
# SAMBANOVA_LLM_MODEL_PREF='gpt-oss-120b'
###########################################
######## Embedding API SElECTION ##########
###########################################
# This will be the assumed default embedding seleciton and model
# EMBEDDING_ENGINE='native'
# EMBEDDING_MODEL_PREF='Xenova/all-MiniLM-L6-v2'
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
# EMBEDDING_ENGINE='ollama'
# EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_ENGINE='lmstudio'
# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_ENGINE='cohere'
# COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
# EMBEDDING_ENGINE='litellm'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
# EMBEDDING_ENGINE='generic-openai'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
# GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
# GENERIC_OPEN_AI_EMBEDDING_API_DELAY_MS=1000
# EMBEDDING_ENGINE='gemini'
# GEMINI_EMBEDDING_API_KEY=
# EMBEDDING_MODEL_PREF='text-embedding-004'
# EMBEDDING_ENGINE='openrouter'
# EMBEDDING_MODEL_PREF='baai/bge-m3'
# OPENROUTER_API_KEY=''
###########################################
######## Vector Database Selection ########
###########################################
# Enable all below if you are using vector database: LanceDB.
# VECTOR_DB="lancedb"
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="pgvector"
# PGVECTOR_CONNECTION_STRING="postgresql://dbuser:dbuserpass@localhost:5432/yourdb"
# PGVECTOR_TABLE_NAME="anythingllm_vectors" # optional, but can be defined
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
# Enable all below if you are using vector database: Chroma Cloud.
# VECTOR_DB="chromacloud"
# CHROMACLOUD_API_KEY="ck-your-api-key"
# CHROMACLOUD_TENANT=
# CHROMACLOUD_DATABASE=
# Enable all below if you are using vector database: Pinecone.
# VECTOR_DB="pinecone"
# PINECONE_API_KEY=
# PINECONE_INDEX=
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
# Enable all below if you are using vector database: Qdrant.
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
# Enable all below if you are using vector database: Astra DB.
# VECTOR_DB="astra"
# ASTRA_DB_APPLICATION_TOKEN=
# ASTRA_DB_ENDPOINT=
###########################################
######## Audio Model Selection ############
###########################################
# (default) use built-in whisper-small model.
# WHISPER_PROVIDER="local"
# use openai hosted whisper model.
# WHISPER_PROVIDER="openai"
# OPEN_AI_KEY=sk-xxxxxxxx
###########################################
######## TTS/STT Model Selection ##########
###########################################
# TTS_PROVIDER="native"
# TTS_PROVIDER="openai"
# TTS_OPEN_AI_KEY=sk-example
# TTS_OPEN_AI_VOICE_MODEL=nova
# TTS_PROVIDER="generic-openai"
# TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
# TTS_OPEN_AI_COMPATIBLE_MODEL=tts-1
# TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
# TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
# TTS_PROVIDER="elevenlabs"
# TTS_ELEVEN_LABS_KEY=
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
# DISABLE_TELEMETRY="false"
###########################################
######## PASSWORD COMPLEXITY ##############
###########################################
# Enforce a password schema for your organization users.
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
# Default is only 8 char minimum
# PASSWORDMINCHAR=8
# PASSWORDMAXCHAR=250
# PASSWORDLOWERCASE=1
# PASSWORDUPPERCASE=1
# PASSWORDNUMERIC=1
# PASSWORDSYMBOL=1
# PASSWORDREQUIREMENTS=4
###########################################
######## ENABLE HTTPS SERVER ##############
###########################################
# By enabling this and providing the path/filename for the key and cert,
# the server will use HTTPS instead of HTTP.
#ENABLE_HTTPS="true"
#HTTPS_CERT_PATH="sslcert/cert.pem"
#HTTPS_KEY_PATH="sslcert/key.pem"
###########################################
######## AGENT SERVICE KEYS ###############
###########################################
#------ SEARCH ENGINES -------
#=============================
#------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
# AGENT_GSE_KEY=
# AGENT_GSE_CTX=
#------ SearchApi.io ----------- https://www.searchapi.io/
# AGENT_SEARCHAPI_API_KEY=
# AGENT_SEARCHAPI_ENGINE=google
#------ SerpApi ----------- https://serpapi.com/
# AGENT_SERPAPI_API_KEY=
# AGENT_SERPAPI_ENGINE=google
#------ Serper.dev ----------- https://serper.dev/
# AGENT_SERPER_DEV_KEY=
#------ Bing Search ----------- https://portal.azure.com/
# AGENT_BING_SEARCH_API_KEY=
#------ Serply.io ----------- https://serply.io/
# AGENT_SERPLY_API_KEY=
#------ SearXNG ----------- https://github.com/searxng/searxng
# AGENT_SEARXNG_API_URL=
#------ Tavily ----------- https://www.tavily.com/
# AGENT_TAVILY_API_KEY=
#------ Exa Search ----------- https://www.exa.ai/
# AGENT_EXA_API_KEY=
###########################################
######## Other Configurations ############
###########################################
# Disable viewing chat history from the UI and frontend APIs.
# See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
# DISABLE_VIEW_CHAT_HISTORY=1
# Enable simple SSO passthrough to pre-authenticate users from a third party service.
# See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
# SIMPLE_SSO_ENABLED=1
# SIMPLE_SSO_NO_LOGIN=1
# SIMPLE_SSO_NO_LOGIN_REDIRECT=https://your-custom-login-url.com (optional)
# Allow scraping of any IP address in collector - must be string "true" to be enabled
# See https://docs.anythingllm.com/configuration#local-ip-address-scraping for more information.
# COLLECTOR_ALLOW_ANY_IP="true"
# Specify the target languages for when using OCR to parse images and PDFs.
# This is a comma separated list of language codes as a string. Unsupported languages will be ignored.
# Default is English. See https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html for a list of valid language codes.
# TARGET_OCR_LANG=eng,deu,ita,spa,fra,por,rus,nld,tur,hun,pol,ita,spa,fra,por,rus,nld,tur,hun,pol
# Runtime flags for built-in pupeeteer Chromium instance
# This is only required on Linux machines running AnythingLLM via Docker
# and do not want to use the --cap-add=SYS_ADMIN docker argument
# ANYTHINGLLM_CHROMIUM_ARGS="--no-sandbox,--disable-setuid-sandbox"
# Disable Swagger API documentation endpoint.
# Set to "true" to disable the /api/docs endpoint (recommended for production deployments).
# DISABLE_SWAGGER_DOCS="true"
# Disable MCP cooldown timer for agent calls
# this can lead to infinite recursive calls of the same function
# for some model/provider combinations
# MCP_NO_COOLDOWN="true

View File

@@ -0,0 +1,29 @@
[Unit]
Description=AnythingLLM Document Collector
Documentation=https://docs.anythingllm.com
After=network.target anythingllm-server.service
BindsTo=anythingllm-server.service
[Service]
Type=simple
User={{ anythingllm_user }}
Group={{ anythingllm_group }}
WorkingDirectory={{ anythingllm_directory }}/app/collector
EnvironmentFile={{ anythingllm_directory }}/app/server/.env
Environment=NODE_ENV=production
ExecStart=/usr/bin/node index.js
Restart=on-failure
RestartSec=10
StandardOutput=journal
StandardError=journal
SyslogIdentifier=anythingllm-collector
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths={{ anythingllm_directory }}
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,29 @@
[Unit]
Description=AnythingLLM Server
Documentation=https://docs.anythingllm.com
After=network.target postgresql.service
Wants=anythingllm-collector.service
[Service]
Type=simple
User={{ anythingllm_user }}
Group={{ anythingllm_group }}
WorkingDirectory={{ anythingllm_directory }}/app/server
Environment=NODE_ENV=production
Environment=SERVER_PORT={{ anythingllm_port }}
ExecStart=/usr/bin/node index.js
Restart=on-failure
RestartSec=10
StandardOutput=journal
StandardError=journal
SyslogIdentifier=anythingllm-server
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths={{ anythingllm_directory }}
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,60 @@
{
"mcpServers": {
"upstash-context7": {
"command": "npx",
"args": [
"-y",
"@upstash/context7-mcp"
]
},
"angelia": {
"type": "streamable",
"url": "{{angelia_mcp_url}}",
"headers": {
"Authorization": "Bearer {{angelia_mcp_auth}}"
}
},
"argos": {
"type": "streamable",
"url": "{{argos_mcp_url}}"
},
"caliban": {
"type": "streamable",
"url": "{{caliban_mcp_url}}"
},
"gitea": {
"type": "streamable",
"url": "{{gitea_mcp_url}}"
},
"github": {
"type": "streamable",
"url": "https://api.githubcopilot.com/mcp/",
"headers": {
"Authorization": "Bearer {{github_personal_access_token}}"
}
},
"grafana": {
"type": "streamable",
"url": "{{grafana_mcp_url}}"
},
"huggingface": {
"type": "streamable",
"url": "https://huggingface.co/mcp",
"headers": {
"Authorization": "Bearer {{huggingface_mcp_token}}"
}
},
"korax": {
"type": "streamable",
"url": "{{korax_mcp_url}}"
},
"neo4j": {
"type": "streamable",
"url": "{{neo4j_mcp_url}}"
},
"nike": {
"type": "streamable",
"url": "{{nike_mcp_url}}"
}
}
}

View File

@@ -0,0 +1,276 @@
---
- name: Deploy AnythingLLM (Native Node.js)
hosts: ubuntu
become: true
vars:
nodejs_version: "22"
ansible_common_remote_group: "{{ anythingllm_group }}"
allow_world_readable_tmpfiles: true
tasks:
- name: Check if host has anythingllm service
ansible.builtin.set_fact:
has_anythingllm_service: "{{'anythingllm' in services}}"
- name: Skip hosts without anythingllm service
ansible.builtin.meta: end_host
when: not has_anythingllm_service
- name: Install build dependencies
ansible.builtin.apt:
name:
- curl
- tar
- build-essential
- python3
- libpq-dev
state: present
update_cache: true
- name: Add NodeSource GPG key
ansible.builtin.apt_key:
url: https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key
state: present
- name: Add NodeSource repository
ansible.builtin.apt_repository:
repo: "deb https://deb.nodesource.com/node_{{ nodejs_version }}.x nodistro main"
state: present
filename: nodesource
- name: Install Node.js
ansible.builtin.apt:
name: nodejs
state: present
update_cache: true
- name: Install Yarn globally
ansible.builtin.npm:
name: yarn
global: true
state: present
- name: Create anythingllm group
ansible.builtin.group:
name: "{{ anythingllm_group }}"
- name: Create anythingllm user
ansible.builtin.user:
name: "{{ anythingllm_user }}"
comment: "AnythingLLM service account"
group: "{{ anythingllm_group }}"
home: "{{ anythingllm_directory }}"
system: true
shell: /bin/bash
- name: Add remote_user to anythingllm group
ansible.builtin.user:
name: "{{ remote_user }}"
groups: "{{ anythingllm_group }}"
append: true
- name: Create anythingllm directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0750'
- name: Create app directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/app"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0750'
- name: Transfer and unarchive AnythingLLM release
ansible.builtin.unarchive:
src: "~/rel/anythingllm_{{ anythingllm_rel }}.tar"
dest: "{{ anythingllm_directory }}/app"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
mode: '0750'
register: app_unarchive
notify:
- Restart AnythingLLM Server
- Restart AnythingLLM Collector
- name: Run yarn setup
become_user: "{{ anythingllm_user }}"
ansible.builtin.command:
cmd: yarn setup
chdir: "{{ anythingllm_directory }}/app"
when: app_unarchive.changed
register: yarn_setup
- name: Create storage directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/storage"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0750'
- name: Create plugins directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/storage/plugins"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0750'
- name: Template MCP servers configuration
ansible.builtin.template:
src: anythingllm_mcp_servers.json.j2
dest: "{{ anythingllm_directory }}/storage/plugins/anythingllm_mcp_servers.json"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
mode: '0600'
notify:
- Restart AnythingLLM Server
- Restart AnythingLLM Collector
- name: Create hotdir directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/hotdir"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0750'
- name: Create collector symlink directory
ansible.builtin.file:
path: /srv/collector
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: directory
mode: '0755'
- name: Create hotdir symlink for AnythingLLM path resolution
ansible.builtin.file:
src: "{{ anythingllm_directory }}/hotdir"
dest: /srv/collector/hotdir
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: link
- name: Remove collector's default hotdir directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/app/collector/hotdir"
state: absent
- name: Create hotdir symlink for collector
ansible.builtin.file:
src: "{{ anythingllm_directory }}/hotdir"
dest: "{{ anythingllm_directory }}/app/collector/hotdir"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
state: link
- name: Template server environment file
ansible.builtin.template:
src: env.j2
dest: "{{ anythingllm_directory }}/app/server/.env"
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
mode: '0600'
notify:
- Restart AnythingLLM Server
- Restart AnythingLLM Collector
- name: Configure frontend API base
ansible.builtin.lineinfile:
path: "{{ anythingllm_directory }}/app/frontend/.env"
regexp: "^VITE_API_BASE="
line: "VITE_API_BASE='/api'"
create: true
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
mode: '0644'
register: frontend_env
- name: Build frontend
become_user: "{{ anythingllm_user }}"
ansible.builtin.command:
cmd: yarn build
chdir: "{{ anythingllm_directory }}/app/frontend"
when: app_unarchive.changed or frontend_env.changed
register: frontend_build
- name: Remove old server/public directory
ansible.builtin.file:
path: "{{ anythingllm_directory }}/app/server/public"
state: absent
when: frontend_build.changed
- name: Copy frontend build to server/public
become_user: "{{ anythingllm_user }}"
ansible.builtin.copy:
src: "{{ anythingllm_directory }}/app/frontend/dist/"
dest: "{{ anythingllm_directory }}/app/server/public/"
remote_src: true
owner: "{{ anythingllm_user }}"
group: "{{ anythingllm_group }}"
when: frontend_build.changed
- name: Generate Prisma client
become_user: "{{ anythingllm_user }}"
ansible.builtin.command:
cmd: npx prisma generate --schema=./prisma/schema.prisma
chdir: "{{ anythingllm_directory }}/app/server"
when: app_unarchive.changed or yarn_setup.changed
- name: Run Prisma migrations
become_user: "{{ anythingllm_user }}"
ansible.builtin.command:
cmd: npx prisma migrate deploy --schema=./prisma/schema.prisma
chdir: "{{ anythingllm_directory }}/app/server"
when: app_unarchive.changed or yarn_setup.changed
- name: Create AnythingLLM server systemd service
ansible.builtin.template:
src: anythingllm-server.service.j2
dest: /etc/systemd/system/anythingllm-server.service
mode: '0644'
notify:
- Reload systemd
- Restart AnythingLLM Server
- name: Create AnythingLLM collector systemd service
ansible.builtin.template:
src: anythingllm-collector.service.j2
dest: /etc/systemd/system/anythingllm-collector.service
mode: '0644'
notify:
- Reload systemd
- Restart AnythingLLM Collector
- name: Enable and start AnythingLLM server
ansible.builtin.systemd:
name: anythingllm-server
enabled: true
state: started
daemon_reload: true
- name: Enable and start AnythingLLM collector
ansible.builtin.systemd:
name: anythingllm-collector
enabled: true
state: started
daemon_reload: true
handlers:
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Restart AnythingLLM Server
ansible.builtin.systemd:
name: anythingllm-server
state: restarted
- name: Restart AnythingLLM Collector
ansible.builtin.systemd:
name: anythingllm-collector
state: restarted

View File

@@ -0,0 +1,393 @@
networks:
frontend:
driver: bridge
backend:
driver: bridge
monitoring:
driver: bridge
volumes:
anythingllm_data:
driver: local
postgres_data:
driver: local
prometheus_data:
driver: local
loki_data:
driver: local
grafana_data:
driver: local
services:
# ============================================
# PostgreSQL with pgvector Extension
# ============================================
postgres:
image: pgvector/pgvector:pg17
container_name: anythingllm-postgres
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-anythingllm}
POSTGRES_USER: ${POSTGRES_USER:-anythingllm}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
POSTGRES_INITDB_ARGS: "-E UTF8"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/init-pgvector.sql:/docker-entrypoint-initdb.d/init-pgvector.sql:ro
networks:
- backend
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-anythingllm}"]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 1G
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=postgres"
# ============================================
# AnythingLLM Application
# ============================================
anythingllm:
image: mintplexlabs/anythingllm:latest
container_name: anythingllm
restart: unless-stopped
cap_add:
- SYS_ADMIN
environment:
# Server Configuration
SERVER_PORT: 3001
JWT_SECRET: ${JWT_SECRET:?JWT_SECRET is required}
SIG_KEY: ${SIG_KEY:?SIG_KEY is required}
SIG_SALT: ${SIG_SALT:?SIG_SALT is required}
STORAGE_DIR: /app/server/storage
# PostgreSQL Configuration
VECTOR_DB: "pgvector"
PGVECTOR_CONNECTION_STRING: "postgresql://${POSTGRES_USER:-anythingllm}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB:-anythingllm}"
# LLM Provider - Generic OpenAI (for llama-cpp)
LLM_PROVIDER: "generic-openai"
GENERIC_OPEN_AI_BASE_PATH: ${LLAMACPP_BASE_URL:?LLAMACPP_BASE_URL is required}
GENERIC_OPEN_AI_MODEL_PREF: ${LLAMACPP_MODEL:-llama-3-8b}
GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT: ${LLAMACPP_TOKEN_LIMIT:-8192}
GENERIC_OPEN_AI_API_KEY: ${LLAMACPP_API_KEY:-not-needed}
# AWS Bedrock Configuration (optional - uncomment if using)
# LLM_PROVIDER: "bedrock"
# AWS_BEDROCK_LLM_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
# AWS_BEDROCK_LLM_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
# AWS_BEDROCK_LLM_REGION: ${AWS_REGION:-us-east-1}
# AWS_BEDROCK_LLM_MODEL_PREFERENCE: ${BEDROCK_MODEL:-anthropic.claude-3-sonnet-20240229-v1:0}
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT: 200000
# Embedding Configuration
EMBEDDING_ENGINE: ${EMBEDDING_ENGINE}
EMBEDDING_MODEL_PREF: ${EMBEDDING_MODEL_PREF}
EMBEDDING_MODEL_MAX_CHUNK_LENGTH: ${EMBEDDING_MODEL_MAX_CHUNK_LENGTH}
EMBEDDING_BASE_PATH: ${EMBEDDING_BASE_PATH}
GENERIC_OPEN_AI_EMBEDDING_API_KEY: ${GENERIC_OPEN_AI_EMBEDDING_API_KEY}
GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS: ${GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS}
GENERIC_OPEN_AI_EMBEDDING_API_DELAY_MS: ${GENERIC_OPEN_AI_EMBEDDING_API_DELAY_MS}
# Whisper Configuration
WHISPER_PROVIDER: "local"
# TTS Configuration
TTS_PROVIDER: "native"
# Security
DISABLE_TELEMETRY: "true"
# Logging (JSON format for Loki)
NODE_ENV: production
# Optional: Enable HTTP logging
# ENABLE_HTTP_LOGGER: "true"
# ENABLE_HTTP_LOGGER_TIMESTAMPS: "true"
volumes:
- anythingllm_data:/app/server/storage
- anythingllm_data:/app/collector/hotdir
- anythingllm_data:/app/collector/outputs
networks:
- frontend
- backend
depends_on:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3001/api/ping"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
deploy:
resources:
limits:
memory: 4G
reservations:
memory: 2G
logging:
driver: "json-file"
options:
max-size: "50m"
max-file: "5"
labels: "service=anythingllm"
# ============================================
# HAProxy - Reverse Proxy & Load Balancer
# ============================================
haproxy:
image: haproxy:2.9-alpine
container_name: anythingllm-haproxy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "8404:8404" # HAProxy stats
volumes:
- ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
- ./haproxy/certs:/etc/haproxy/certs:ro
- ./haproxy/errors:/etc/haproxy/errors:ro
networks:
- frontend
- monitoring
depends_on:
- anythingllm
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8404/stats"]
interval: 10s
timeout: 5s
retries: 3
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=haproxy"
# ============================================
# Prometheus - Metrics Collection
# ============================================
prometheus:
image: prom/prometheus:latest
container_name: anythingllm-prometheus
restart: unless-stopped
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=30d'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
- '--web.enable-lifecycle'
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- ./prometheus/alerts.yml:/etc/prometheus/alerts.yml:ro
- prometheus_data:/prometheus
networks:
- monitoring
- backend
ports:
- "9090:9090"
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9090/-/healthy"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 1G
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=prometheus"
# ============================================
# Postgres Exporter - Database Metrics
# ============================================
postgres-exporter:
image: prometheuscommunity/postgres-exporter:latest
container_name: anythingllm-postgres-exporter
restart: unless-stopped
environment:
DATA_SOURCE_NAME: "postgresql://${POSTGRES_USER:-anythingllm}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB:-anythingllm}?sslmode=disable"
networks:
- backend
- monitoring
depends_on:
postgres:
condition: service_healthy
deploy:
resources:
limits:
memory: 256M
reservations:
memory: 128M
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
labels: "service=postgres-exporter"
# ============================================
# cAdvisor - Container Metrics
# ============================================
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: anythingllm-cadvisor
restart: unless-stopped
privileged: true
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
networks:
- monitoring
ports:
- "8080:8080"
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-size: "5m"
max-file: "2"
labels: "service=cadvisor"
# ============================================
# Loki - Log Aggregation
# ============================================
loki:
image: grafana/loki:latest
container_name: anythingllm-loki
restart: unless-stopped
command: -config.file=/etc/loki/loki-config.yml
volumes:
- ./loki/loki-config.yml:/etc/loki/loki-config.yml:ro
- loki_data:/loki
networks:
- monitoring
ports:
- "3100:3100"
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3100/ready"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 1G
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=loki"
# ============================================
# Grafana Alloy - Log Collection
# ============================================
alloy:
image: grafana/alloy:latest
container_name: anythingllm-alloy
restart: unless-stopped
command:
- run
- /etc/alloy/config.alloy
- --server.http.listen-addr=0.0.0.0:12345
- --storage.path=/var/lib/alloy/data
volumes:
- ./alloy/config.alloy:/etc/alloy/config.alloy:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
networks:
- monitoring
ports:
- "12345:12345"
depends_on:
- loki
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=alloy"
# ============================================
# Grafana - Visualization Dashboard
# ============================================
grafana:
image: grafana/grafana:latest
container_name: anythingllm-grafana
restart: unless-stopped
environment:
GF_SECURITY_ADMIN_USER: ${GRAFANA_ADMIN_USER:-admin}
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD:?GRAFANA_ADMIN_PASSWORD is required}
GF_INSTALL_PLUGINS: grafana-clock-panel,grafana-simple-json-datasource
GF_SERVER_ROOT_URL: ${GRAFANA_ROOT_URL:-http://localhost:3000}
GF_USERS_ALLOW_SIGN_UP: "false"
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning:ro
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
networks:
- monitoring
- frontend
ports:
- "3000:3000"
depends_on:
- prometheus
- loki
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: 1G
reservations:
memory: 512M
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=grafana"

View File

@@ -0,0 +1,79 @@
# AnythingLLM Server Environment Configuration
# Managed by Ansible - Red Panda Approved
# Generated for {{ inventory_hostname }}
# ============================================
# Server Configuration
# ============================================
SERVER_PORT={{ anythingllm_port }}
STORAGE_DIR={{ anythingllm_directory }}/storage
# ============================================
# Security
# ============================================
JWT_SECRET={{ anythingllm_jwt_secret }}
SIG_KEY={{ anythingllm_sig_key }}
SIG_SALT={{ anythingllm_sig_salt }}
# ============================================
# PostgreSQL + pgvector (Portia)
# ============================================
VECTOR_DB=pgvector
PGVECTOR_CONNECTION_STRING=postgresql://{{ anythingllm_db_user }}:{{ anythingllm_db_password }}@{{ anythingllm_db_host }}:{{ anythingllm_db_port }}/{{ anythingllm_db_name }}
# ============================================
# LLM Provider - AWS Bedrock
# ============================================
# LLM_PROVIDER='bedrock'
# AWS_BEDROCK_LLM_ACCESS_KEY_ID=
# AWS_BEDROCK_LLM_ACCESS_KEY=
# AWS_BEDROCK_LLM_REGION=us-west-2
# AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
# AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
# AWS_BEDROCK_LLM_CONNECTION_METHOD=iam
# AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS=4096
# AWS_BEDROCK_LLM_SESSION_TOKEN= # Only required if CONNECTION_METHOD is 'sessionToken'
# or even use Short and Long Term API keys
# AWS_BEDROCK_LLM_CONNECTION_METHOD="apiKey"
# AWS_BEDROCK_LLM_API_KEY=
# ============================================
# LLM Provider - Generic OpenAI (llama-cpp)
# ============================================
LLM_PROVIDER=generic-openai
GENERIC_OPEN_AI_BASE_PATH={{ anythingllm_llm_base_url }}
GENERIC_OPEN_AI_MODEL_PREF={{ anythingllm_llm_model }}
GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT={{ anythingllm_llm_token_limit }}
GENERIC_OPEN_AI_API_KEY={{ anythingllm_llm_api_key }}
# ============================================
# Embedding Configuration
# ============================================
EMBEDDING_ENGINE={{ anythingllm_embedding_engine }}
EMBEDDING_MODEL_PREF={{ anythingllm_embedding_model }}
# ============================================
# TTS Configuration (FastKokoro)
# ============================================
TTS_PROVIDER={{ anythingllm_tts_provider }}
{% if anythingllm_tts_provider == 'openai' %}
TTS_OPEN_AI_KEY={{ anythingllm_tts_api_key }}
TTS_OPEN_AI_ENDPOINT={{ anythingllm_tts_endpoint }}
TTS_OPEN_AI_MODEL={{ anythingllm_tts_model }}
TTS_OPEN_AI_VOICE={{ anythingllm_tts_voice }}
{% endif %}
# ============================================
# Whisper Configuration
# ============================================
WHISPER_PROVIDER=local
# use openai hosted whisper model.
# WHISPER_PROVIDER="openai"
# OPEN_AI_KEY=sk-xxxxxxxx
# ============================================
# Telemetry & Environment
# ============================================
DISABLE_TELEMETRY=true
NODE_ENV=production

View File

@@ -0,0 +1,29 @@
---
- name: Stage AnythingLLM release tarball
hosts: localhost
gather_facts: false
vars:
anythingllm_repo_dir: "{{github_dir}}/anything-llm"
archive_path: "{{rel_dir}}/anythingllm_{{anythingllm_rel}}.tar"
tasks:
- name: Ensure release directory exists
file:
path: "{{rel_dir}}"
state: directory
mode: '755'
- name: Fetch all remote branches and tags
ansible.builtin.command: git fetch --all
args:
chdir: "{{anythingllm_repo_dir}}"
- name: Pull latest changes
ansible.builtin.command: git pull
args:
chdir: "{{anythingllm_repo_dir}}"
- name: Create AnythingLLM archive for specified release
ansible.builtin.command: git archive -o "{{archive_path}}" "{{anythingllm_rel}}"
args:
chdir: "{{anythingllm_repo_dir}}"

11
ansible/apt_update.yml Normal file
View File

@@ -0,0 +1,11 @@
---
- name: Update Ubuntu packages
hosts: ubuntu
become: true
tasks:
- name: Update all packages to the latest version
ansible.builtin.apt:
name: "*"
state: latest
update_cache: true

76
ansible/argos/deploy.yml Normal file
View File

@@ -0,0 +1,76 @@
---
- name: Deploy Argos MCP Server
hosts: ubuntu
handlers:
- name: restart argos
become: true
community.docker.docker_compose_v2:
project_src: "{{argos_directory}}"
state: restarted
tasks:
- name: Check if host has argos service
ansible.builtin.set_fact:
has_argos_service: "{{ 'argos' in services | default([]) }}"
- name: Skip hosts without argos service
ansible.builtin.meta: end_host
when: not has_argos_service
- name: Create argos group
become: true
ansible.builtin.group:
name: "{{argos_group}}"
state: present
- name: Create argos user
become: true
ansible.builtin.user:
name: "{{argos_user}}"
group: "{{argos_group}}"
system: true
create_home: false
- name: Add ansible user to argos group
become: true
ansible.builtin.user:
name: "{{ansible_user}}"
groups: "{{argos_group}}"
append: true
- name: Create argos directory
become: true
ansible.builtin.file:
path: "{{argos_directory}}"
owner: "{{argos_user}}"
group: "{{argos_group}}"
state: directory
mode: '750'
- name: Transfer and unarchive git archive
become: true
ansible.builtin.unarchive:
src: "~/rel/argos_{{argos_rel}}.tar"
dest: "{{argos_directory}}"
owner: "{{argos_user}}"
group: "{{argos_group}}"
mode: '550'
- name: Template docker-compose.yml
become: true
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{argos_directory}}/docker-compose.yml"
owner: "{{argos_user}}"
group: "{{argos_group}}"
mode: '550'
notify: restart argos
- name: Start argos with docker-compose
become: true
community.docker.docker_compose_v2:
project_src: "{{argos_directory}}"
state: present
pull: always

View File

@@ -0,0 +1,44 @@
services:
argos-searxng:
build: .
depends_on:
- kvdb
environment:
- ARGOS_PORT=8000
- ARGOS_HOST=0.0.0.0
- ARGOS_SEARXNG_INSTANCES={{argos_searxng_instances}}
- ARGOS_MEMCACHED_HOST=kvdb
- ARGOS_MEMCACHED_PORT=11211
- ARGOS_CACHE_TTL={{argos_cache_ttl}}
- ARGOS_MAX_RESULTS_DEFAULT={{argos_max_results}}
- ARGOS_REQUEST_TIMEOUT=30.0
- ARGOS_HEALTH_CHECK_TIMEOUT=5.0
- ARGOS_LOG_LEVEL={{argos_log_level}}
- ARGOS_ENABLE_STARTUP_HEALTH_CHECK=true
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/live"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{argos_syslog_port}}"
syslog-format: "{{syslog_format}}"
tag: "athena-kvdb"
ports:
- {{argos_port}}:8000
restart: unless-stopped
kvdb:
image: memcached:1.6-trixie
pull_policy: always
command: memcached -m 128 -I 10m
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{argos_syslog_port}}"
syslog-format: "{{syslog_format}}"
tag: "argos-kvdb"
restart: unless-stopped

32
ansible/argos/remove.yml Normal file
View File

@@ -0,0 +1,32 @@
---
- name: Remove Argos from Dev Environment
hosts: ubuntu
become: true
tasks:
- name: Check if host has argos service
ansible.builtin.set_fact:
has_argos_service: "{{ 'argos' in services | default([]) }}"
- name: Skip hosts without argos service
ansible.builtin.meta: end_host
when: not has_argos_service
- name: Stop and remove Docker containers, volumes, and images
community.docker.docker_compose_v2:
project_src: "{{argos_directory}}"
state: absent
remove_images: all
remove_volumes: true
- name: Prune Docker images
ansible.builtin.docker_prune:
images: true
images_filters:
dangling: false
- name: Remove Argos directory
become: true
ansible.builtin.file:
path: "{{argos_directory}}"
state: absent

34
ansible/argos/stage.yml Normal file
View File

@@ -0,0 +1,34 @@
---
- name: Stage Argos release tarball
hosts: localhost
gather_facts: false
vars:
argos_repo_dir: "{{repo_dir}}/argos"
archive_path: "{{rel_dir}}/argos_{{argos_rel}}.tar"
tasks:
- name: Ensure release directory exists
file:
path: "{{rel_dir}}"
state: directory
mode: '755'
- name: Fetch all remote branches and tags
ansible.builtin.command: git fetch --all
args:
chdir: "{{argos_repo_dir}}"
- name: Git pull
ansible.builtin.command: git pull
args:
chdir: "{{argos_repo_dir}}"
- name: Checkout specified argos release branch or tag
ansible.builtin.command: git checkout "{{argos_rel}}"
args:
chdir: "{{argos_repo_dir}}"
- name: Create argos archive for specified release
ansible.builtin.command: git archive -o "{{archive_path}}" "{{argos_rel}}"
args:
chdir: "{{argos_repo_dir}}"

243
ansible/arke/.env.example Normal file
View File

@@ -0,0 +1,243 @@
# Arke Configuration Example
# Copy this file to .env and update with your values
# ============================================================================
# Server Configuration
# ============================================================================
HOST=0.0.0.0
PORT=8000
DEBUG=false
LOG_LEVEL=info
RELOAD=false
# ============================================================================
# PostgreSQL Database Configuration
# ============================================================================
# Database environment variables (same as above)
DB_HOST=localhost
DB_PORT=5432
DB_NAME=arke
DB_USER=arke
DB_PASSWORD=your_secure_password
# ============================================================================
# Memcached Configuration
# ============================================================================
MEMCACHED_HOST=localhost
MEMCACHED_PORT=11211
# ============================================================================
# Multi-Backend Configuration (Environment Variable Format)
# ============================================================================
# Ansible-friendly configuration using individual environment variables
# No JSON escaping issues, works perfectly with Ansible Vault
# --- NTTh Backend (Token Pool) ---
# NTTh is treated specially as it manages a pool of tokens with session limits
NTTH_BACKEND_ENABLED=true
NTTH_SESSION_LIMIT=90
NTTH_SESSION_TTL=3600
NTTH_TOKEN_CACHE_TTL=82800
# NTTh Tokens (numbered, add as many as needed)
NTTH_TOKEN_1_APP_ID=your_app_id_1
NTTH_TOKEN_1_APP_SECRET=your_secret_1
NTTH_TOKEN_1_NAME=production-primary
NTTH_TOKEN_2_APP_ID=your_app_id_2
NTTH_TOKEN_2_APP_SECRET=your_secret_2
NTTH_TOKEN_2_NAME=production-backup
# Add more tokens as needed:
# NTTH_TOKEN_3_APP_ID=your_app_id_3
# NTTH_TOKEN_3_APP_SECRET=your_secret_3
# NTTH_TOKEN_3_NAME=production-tertiary
# --- Standard Backends (OpenAI-Compatible, etc.) ---
# Backend 1: Nyx (llama-cpp instance)
BACKEND_1_NAME=nyx
BACKEND_1_TYPE=openai-compatible
BACKEND_1_ENABLED=true
BACKEND_1_BASE_URL=http://nyx.helu.ca:8080/v1
BACKEND_1_API_KEY=not-needed
BACKEND_1_MODEL_PREFIX=nyx
BACKEND_1_TIMEOUT=60
# Backend 2: Athena (llama-cpp instance)
BACKEND_2_NAME=athena
BACKEND_2_TYPE=openai-compatible
BACKEND_2_ENABLED=true
BACKEND_2_BASE_URL=http://athena.helu.ca:8080/v1
BACKEND_2_API_KEY=not-needed
BACKEND_2_MODEL_PREFIX=athena
BACKEND_2_TIMEOUT=60
# ============================================================================
# Future Backend Examples (Reference Only - Not Active)
# ============================================================================
# These examples show how to configure other backend types when needed
# --- Anthropic Backend Example ---
# BACKEND_3_NAME=anthropic
# BACKEND_3_TYPE=anthropic
# BACKEND_3_ENABLED=true
# BACKEND_3_BASE_URL=https://api.anthropic.com
# BACKEND_3_API_KEY=sk-ant-api03-xxxxx
# BACKEND_3_MODEL_PREFIX=anthropic
# BACKEND_3_TIMEOUT=60
# --- Azure OpenAI Backend Example ---
# BACKEND_4_NAME=azure-openai
# BACKEND_4_TYPE=azure-openai
# BACKEND_4_ENABLED=true
# BACKEND_4_BASE_URL=https://your-resource.openai.azure.com
# BACKEND_4_API_KEY=your-azure-key
# BACKEND_4_MODEL_PREFIX=azure
# BACKEND_4_DEPLOYMENT_NAME=gpt-4
# BACKEND_4_API_VERSION=2024-02-15-preview
# BACKEND_4_TIMEOUT=60
# --- AWS Bedrock Backend Example ---
# BACKEND_5_NAME=bedrock
# BACKEND_5_TYPE=bedrock
# BACKEND_5_ENABLED=true
# BACKEND_5_AWS_REGION=us-east-1
# BACKEND_5_AWS_ACCESS_KEY_ID=AKIA...
# BACKEND_5_AWS_SECRET_ACCESS_KEY=secret...
# BACKEND_5_MODEL_PREFIX=bedrock
# BACKEND_5_TIMEOUT=60
# --- OpenAI Direct Backend Example ---
# BACKEND_6_NAME=openai
# BACKEND_6_TYPE=openai-compatible
# BACKEND_6_ENABLED=true
# BACKEND_6_BASE_URL=https://api.openai.com/v1
# BACKEND_6_API_KEY=sk-...
# BACKEND_6_MODEL_PREFIX=openai
# BACKEND_6_TIMEOUT=60
# ============================================================================
# Embedding Provider Configuration
# ============================================================================
# Choose your embedding provider: 'ollama' or 'openai'
EMBEDDING_PROVIDER=ollama
# --- Ollama Configuration (when EMBEDDING_PROVIDER=ollama) ---
OLLAMA_HOST=nyx.helu.ca
OLLAMA_PORT=11434
EMBEDDING_MODEL=nomic-embed-text
# --- OpenAI-Compatible Configuration (when EMBEDDING_PROVIDER=openai) ---
# Works with OpenAI API, llama-cpp, LocalAI, and other compatible services
OPENAI_EMBEDDING_BASE_URL=http://localhost:8080
OPENAI_EMBEDDING_API_KEY=
OPENAI_EMBEDDING_MODEL=text-embedding-ada-002
# --- Common Embedding Configuration ---
EMBEDDING_TIMEOUT=30.0
# --- Batch Chunking Configuration (for llama-cpp) ---
# These settings optimize embedding requests for llama-cpp's context limits
EMBEDDING_BATCH_SIZE=512
EMBEDDING_UBATCH_SIZE=512
EMBEDDING_MAX_CONTEXT=8192
# ============================================================================
# Memory System Configuration
# ============================================================================
MEMORY_ENABLED=true
MAX_CONTEXT_TOKENS=8000
SIMILARITY_THRESHOLD=0.7
MIN_IMPORTANCE_SCORE=0.7
# ============================================================================
# Message Size Limits
# ============================================================================
# Maximum tokens allowed for incoming messages (default: 32768)
# This limit prevents excessively large requests that could overwhelm the system
MESSAGE_MAX_TOKENS=32768
# ============================================================================
# Background Task Configuration (Async Embedding Generation)
# ============================================================================
# Enable background task processing for async operations
BACKGROUND_TASKS_ENABLED=true
# Number of worker threads for background tasks
BACKGROUND_TASK_WORKERS=5
# Maximum retry attempts for failed tasks
BACKGROUND_TASK_MAX_RETRIES=3
# Initial retry delay in seconds (uses exponential backoff)
BACKGROUND_TASK_RETRY_DELAY=1.0
# Cleanup interval for old completed/failed tasks (hours)
BACKGROUND_TASK_CLEANUP_HOURS=24
# --- Async Embedding Configuration ---
# Enable async embedding generation (non-blocking)
ASYNC_EMBEDDINGS_ENABLED=true
# Number of messages to batch together for embedding generation
ASYNC_EMBEDDING_BATCH_SIZE=50
# Priority level for embedding tasks: LOW, NORMAL, HIGH, CRITICAL
ASYNC_EMBEDDING_PRIORITY=NORMAL
# --- Async Deduplication Configuration ---
# Enable async document enhancement (non-blocking embedding generation for deduplicated documents)
ASYNC_DEDUPLICATION_ENABLED=true
# Number of documents to batch together for enhancement
DEDUPLICATION_BATCH_SIZE=20
# Priority level for document enhancement tasks: LOW, NORMAL, HIGH, CRITICAL
DEDUPLICATION_ENHANCEMENT_PRIORITY=NORMAL
# Enable HTML content extraction and processing
HTML_CONTENT_EXTRACTION=true
# Minimum token count for document deduplication
MIN_TOKENS_FOR_DEDUP=500
# Semantic similarity threshold for duplicate detection (0.0-1.0)
DEDUPLICATION_THRESHOLD=0.95
# Reference expansion strategy: smart, full, summary, minimal
REFERENCE_EXPANSION_STRATEGY=smart
# ============================================================================
# Monitoring Configuration
# ============================================================================
PROMETHEUS_ENABLED=true
METRICS_PORT=9090
# ============================================================================
# Example Configurations for Different Setups
# ============================================================================
# Example 1: Using Ollama (default)
# EMBEDDING_PROVIDER=ollama
# OLLAMA_HOST=localhost
# OLLAMA_PORT=11434
# EMBEDDING_MODEL=nomic-embed-text
# Example 2: Using llama-cpp with OpenAI-compatible API
# EMBEDDING_PROVIDER=openai
# OPENAI_EMBEDDING_BASE_URL=http://localhost:8080
# OPENAI_EMBEDDING_MODEL=text-embedding-ada-002
# OPENAI_EMBEDDING_API_KEY= # Optional, leave empty if not required
# Example 3: Using actual OpenAI API
# EMBEDDING_PROVIDER=openai
# OPENAI_EMBEDDING_BASE_URL=https://api.openai.com
# OPENAI_EMBEDDING_MODEL=text-embedding-3-small
# OPENAI_EMBEDDING_API_KEY=sk-your-openai-api-key
# Example 4: Using LocalAI
# EMBEDDING_PROVIDER=openai
# OPENAI_EMBEDDING_BASE_URL=http://localhost:8080
# OPENAI_EMBEDDING_MODEL=bert-embeddings
# OPENAI_EMBEDDING_API_KEY= # Optional

147
ansible/arke/.env.j2 Normal file
View File

@@ -0,0 +1,147 @@
# Arke Environment Configuration
# Edit these values as needed before deployment
# ============================================================================
# Server Configuration
# ============================================================================
HOST=0.0.0.0
PORT={{ arke_port }}
DEBUG=false
LOG_LEVEL=info
RELOAD={{ arke_reload | default('false') }}
# ============================================================================
# PostgreSQL Database Configuration
# ============================================================================
DB_HOST={{ arke_db_host }}
DB_PORT={{ arke_db_port }}
DB_NAME={{ arke_db_name }}
DB_USER={{ arke_db_user }}
DB_PASSWORD={{ arke_db_password }}
# ============================================================================
# Memcached Configuration
# ============================================================================
MEMCACHED_HOST={{ arke_memcached_host | default('localhost') }}
MEMCACHED_PORT={{ arke_memcached_port | default('11211') }}
# ============================================================================
# NTTh API Configuration
# ============================================================================
# --- NTTh Backend (Token Pool) ---
# NTTh is treated specially as it manages a pool of tokens with session limits
NTTH_BACKEND_ENABLED=true
NTTH_SESSION_LIMIT=90
NTTH_SESSION_TTL=3600
NTTH_TOKEN_CACHE_TTL=82800
# NTTh Tokens (numbered, add as many as needed)
NTTH_TOKEN_1_NAME={{ntth_token_1_app_name}}
NTTH_TOKEN_1_APP_ID={{ntth_token_1_app_id}}
NTTH_TOKEN_1_APP_SECRET={{ntth_token_1_app_secret}}
NTTH_TOKEN_2_NAME={{ntth_token_2_app_name}}
NTTH_TOKEN_2_APP_ID={{ntth_token_2_app_id}}
NTTH_TOKEN_2_APP_SECRET={{ntth_token_2_app_secret}}
NTTH_TOKEN_3_NAME={{ntth_token_3_app_name}}
NTTH_TOKEN_3_APP_ID={{ntth_token_3_app_id}}
NTTH_TOKEN_3_APP_SECRET={{ntth_token_3_app_secret}}
NTTH_TOKEN_4_NAME={{ntth_token_4_app_name}}
NTTH_TOKEN_4_APP_ID={{ntth_token_4_app_id}}
NTTH_TOKEN_4_APP_SECRET={{ntth_token_4_app_secret}}
# Session Management
SESSION_LIMIT={{ arke_session_limit | default('90') }}
SESSION_TTL={{ arke_session_ttl | default('3600') }}
TOKEN_CACHE_TTL={{ arke_token_cache_ttl | default('82800') }}
# ============================================================================
# Embedding Provider Configuration
# ============================================================================
# Choose your embedding provider: 'ollama' or 'openai'
EMBEDDING_PROVIDER={{arke_embedding_provider}}
# --- OpenAI-Compatible Configuration (when EMBEDDING_PROVIDER=openai) ---
# Works with OpenAI API, llama-cpp, LocalAI, and other compatible services
OPENAI_EMBEDDING_BASE_URL={{arke_openai_embedding_base_url}}
OPENAI_EMBEDDING_API_KEY={{arke_openai_embedding_api_key}}
OPENAI_EMBEDDING_MODEL={{arke_openai_embedding_model}}
# --- Embedding Configuration ---
EMBEDDING_TIMEOUT={{ arke_embedding_timeout | default('30.0') }}
EMBEDDING_BATCH_SIZE={{arke_embedding_batch_size}}
EMBEDDING_UBATCH_SIZE={{arke_embedding_ubatch_size}}
EMBEDDING_MAX_CONTEXT={{arke_embedding_max_context}}
# ============================================================================
# Memory System Configuration
# ============================================================================
MEMORY_ENABLED={{ arke_memory_enabled | default('true') }}
MAX_CONTEXT_TOKENS={{ arke_max_context_tokens | default('8000') }}
SIMILARITY_THRESHOLD={{ arke_similarity_threshold | default('0.7') }}
MIN_IMPORTANCE_SCORE={{ arke_min_importance_score | default('0.7') }}
# ============================================================================
# Message Size Limits
# ============================================================================
# Maximum tokens allowed for incoming messages (default: 32768)
# This limit prevents excessively large requests that could overwhelm the system
MESSAGE_MAX_TOKENS=700000
# ============================================================================
# Background Task Configuration (Async Embedding Generation)
# ============================================================================
# Enable background task processing for async operations
BACKGROUND_TASKS_ENABLED=true
# Number of worker threads for background tasks
BACKGROUND_TASK_WORKERS=5
# Maximum retry attempts for failed tasks
BACKGROUND_TASK_MAX_RETRIES=3
# Initial retry delay in seconds (uses exponential backoff)
BACKGROUND_TASK_RETRY_DELAY=1.0
# Cleanup interval for old completed/failed tasks (hours)
BACKGROUND_TASK_CLEANUP_HOURS=24
# --- Async Embedding Configuration ---
# Enable async embedding generation (non-blocking)
ASYNC_EMBEDDINGS_ENABLED=true
# Number of messages to batch together for embedding generation
ASYNC_EMBEDDING_BATCH_SIZE=50
# Priority level for embedding tasks: LOW, NORMAL, HIGH, CRITICAL
ASYNC_EMBEDDING_PRIORITY=NORMAL
# --- Async Deduplication Configuration ---
# Enable async document enhancement (non-blocking embedding generation for deduplicated documents)
ASYNC_DEDUPLICATION_ENABLED=true
# Number of documents to batch together for enhancement
DEDUPLICATION_BATCH_SIZE=20
# Priority level for document enhancement tasks: LOW, NORMAL, HIGH, CRITICAL
DEDUPLICATION_ENHANCEMENT_PRIORITY=NORMAL
# Enable HTML content extraction and processing
HTML_CONTENT_EXTRACTION=true
# Minimum token count for document deduplication
MIN_TOKENS_FOR_DEDUP=500
# Semantic similarity threshold for duplicate detection (0.0-1.0)
DEDUPLICATION_THRESHOLD=0.95
# Reference expansion strategy: smart, full, summary, minimal
REFERENCE_EXPANSION_STRATEGY=smart
# ============================================================================
# Monitoring Configuration
# ============================================================================
PROMETHEUS_ENABLED=true
METRICS_PORT={{arke_metrics_port}}

View File

@@ -0,0 +1,24 @@
[Unit]
Description=Arke MCP Server
After=network.target
Wants=network.target
[Service]
Type=simple
User={{arke_user}}
Group={{arke_group}}
WorkingDirectory={{arke_directory}}
EnvironmentFile={{arke_directory}}/.env
ExecStart={{arke_directory}}/.venv/bin/python {{arke_directory}}/arke.py
Restart=always
RestartSec=10
# Security
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
[Install]
WantedBy=multi-user.target

181
ansible/arke/deploy.yml Normal file
View File

@@ -0,0 +1,181 @@
---
- name: Deploy Arke Proxy Server
hosts: arke
vars:
ansible_common_remote_group: "{{arke_group}}"
allow_world_readable_tmpfiles: true
tasks:
- name: Create Arke group
become: true
ansible.builtin.group:
name: "{{arke_group}}"
state: present
- name: Create arke user
become: true
ansible.builtin.user:
name: "{{arke_user}}"
group: "{{arke_group}}"
home: "{{arke_directory}}"
shell: /bin/bash
system: true
create_home: false
- name: Add remote_user to arke group
become: true
ansible.builtin.user:
name: "{{remote_user}}"
groups: "{{arke_group}}"
append: true
- name: Create required directories
become: true
ansible.builtin.file:
path: "{{arke_directory}}"
owner: "{{arke_user}}"
group: "{{arke_group}}"
state: directory
mode: '750'
- name: Ensure tar is installed for unarchive task
become: true
ansible.builtin.apt:
name:
- tar
state: present
update_cache: true
- name: Ensure Python, Python Dev, Venv module is installed
become: true
ansible.builtin.apt:
name: [python3,python3-venv,python3-dev]
state: present
update_cache: true
- name: Transfer and unarchive git archive
become: true
ansible.builtin.unarchive:
src: "~/rel/arke_{{arke_rel}}.tar"
dest: "{{arke_directory}}"
owner: "{{arke_user}}"
group: "{{arke_group}}"
mode: '550'
notify: restart arke
- name: Ensure media directories are writable
become: true
ansible.builtin.file:
path: "{{arke_directory}}/media/generated_images"
owner: "{{arke_user}}"
group: "{{arke_group}}"
state: directory
mode: '750'
- name: Create virtual environment for Arke
become: true
become_user: "{{arke_user}}"
ansible.builtin.command:
cmd: "python3 -m venv {{arke_directory}}/.venv/"
creates: "{{arke_directory}}/.venv/bin/activate"
- name: Install wheel in virtual environment
become: true
become_user: "{{arke_user}}"
ansible.builtin.pip:
name:
- wheel
state: latest
virtualenv: "{{arke_directory}}/.venv"
- name: Install pyproject.toml dependencies in virtualenv
become: true
become_user: "{{arke_user}}"
ansible.builtin.pip:
chdir: "{{arke_directory}}"
name: .
virtualenv: "{{arke_directory}}/.venv"
virtualenv_command: python3 -m venv
- name: Install Memcached
become: true
ansible.builtin.apt:
name: memcached
state: present
update_cache: true
- name: Ensure Memcached is running
become: true
ansible.builtin.service:
name: memcached
state: started
enabled: true
- name: Template Arke .env configuration
become: true
ansible.builtin.template:
src: .env.j2
dest: "{{arke_directory}}/.env"
owner: "{{arke_user}}"
group: "{{arke_group}}"
mode: '640'
notify: restart arke
- name: Template systemd service file
become: true
ansible.builtin.template:
src: arke.service.j2
dest: /etc/systemd/system/arke.service
owner: root
group: root
mode: '644'
notify: restart arke
- name: Enable and start arke service
become: true
ansible.builtin.systemd:
name: arke
enabled: true
state: started
daemon_reload: true
- name: Ensure Arke metrics endpoint is open to Prometheus (manual step if not using ufw)
ansible.builtin.debug:
msg: |
Ensure the host's firewall allows inbound TCP on port 8000 from sao.helu.ca for Prometheus scraping.
If using ufw:
sudo ufw allow from <sao.helu.ca_ip> to any port 8000 proto tcp
- name: Reminder - Update Prometheus scrape config on sao.helu.ca
ansible.builtin.debug:
msg: |
Add the following job/target to your Prometheus configuration on sao.helu.ca:
- job_name: 'arke'
static_configs:
- targets: ['<arke_host>:{{arke_port}}']
- name: Validate Arke health endpoints
ansible.builtin.uri:
url: "http://localhost:{{arke_port}}/health"
status_code: 200
return_content: true
register: health_check
retries: 5
delay: 5
until: health_check.status == 200
- name: Validate Arke /metrics endpoint
ansible.builtin.uri:
url: "http://localhost:{{arke_port}}/metrics"
status_code: 200
return_content: false
register: metrics_check
retries: 5
delay: 5
until: metrics_check.status == 200
handlers:
- name: restart arke
become: true
ansible.builtin.systemd:
name: arke
state: restarted

26
ansible/arke/remove.yml Normal file
View File

@@ -0,0 +1,26 @@
---
- name: Remove Arke Proxy Server
hosts: arke
become: true
tasks:
- name: Stop and disable arke service
ansible.builtin.systemd:
name: arke
state: stopped
enabled: false
ignore_errors: true
- name: Remove systemd service file
ansible.builtin.file:
path: /etc/systemd/system/arke.service
state: absent
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
- name: Remove Arke directory
ansible.builtin.file:
path: "{{arke_directory}}"
state: absent

29
ansible/arke/stage.yml Normal file
View File

@@ -0,0 +1,29 @@
---
- name: Stage Arke release tarball
hosts: localhost
gather_facts: false
vars:
archive_path: "{{rel_dir}}/arke_{{arke_rel}}.tar"
arke_repo_dir: "{{repo_dir}}/arke"
tasks:
- name: Ensure release directory exists
file:
path: "{{rel_dir}}"
state: directory
mode: '755'
- name: Fetch all remote branches and tags
ansible.builtin.command: git fetch --all
args:
chdir: "{{arke_repo_dir}}"
- name: Pull latest changes
ansible.builtin.command: git pull
args:
chdir: "{{arke_repo_dir}}"
- name: Create Arke archive for specified release
ansible.builtin.command: git archive -o "{{archive_path}}" "{{arke_rel}}"
args:
chdir: "{{arke_repo_dir}}"

52
ansible/auth_keys.yml Normal file
View File

@@ -0,0 +1,52 @@
---
# SSH Authorized Keys Management
# Deploys authorized_keys to all ubuntu hosts based on ssh_authorized_users variable
#
# Usage:
# ansible-playbook auth_keys.yml
#
# Override exclusive mode (removes unlisted keys):
# ansible-playbook auth_keys.yml -e "ssh_exclusive_mode=true"
#
# Target specific host:
# ansible-playbook auth_keys.yml --limit ariel.incus
#
# Variables defined in: inventory/group_vars/all/auth_keys.yml
- name: Manage SSH Authorized Keys
hosts: ubuntu
become: true
tasks:
- name: Ensure .ssh directory exists for each user
ansible.builtin.file:
path: "/home/{{ item.name }}/.ssh"
state: directory
mode: '0700'
owner: "{{ item.name }}"
group: "{{ item.name }}"
loop: "{{ ssh_authorized_users }}"
loop_control:
label: "{{ item.name }}"
- name: Deploy authorized keys (additive mode)
ansible.posix.authorized_key:
user: "{{ item.0.name }}"
key: "{{ item.1 }}"
state: present
exclusive: false
loop: "{{ ssh_authorized_users | subelements('keys') }}"
loop_control:
label: "{{ item.0.name }}: {{ item.1 | truncate(50) }}"
when: not ssh_exclusive_mode
- name: Deploy authorized keys (exclusive mode)
ansible.posix.authorized_key:
user: "{{ item.name }}"
key: "{{ item.keys | join('\n') }}"
state: present
exclusive: true
loop: "{{ ssh_authorized_users }}"
loop_control:
label: "{{ item.name }}"
when: ssh_exclusive_mode

View File

@@ -0,0 +1,32 @@
# Agent S Environment Configuration
# Source this file to activate the Agent S environment
# Usage: source ~/.agent_s_env
# Activate Python virtual environment
if [ -f "{{ agent_s_venv }}/bin/activate" ]; then
source "{{ agent_s_venv }}/bin/activate"
echo "✓ Agent S Python environment activated"
fi
# Set Agent S paths
export AGENT_S_HOME="{{ agent_s_repo }}"
export PATH="{{ agent_s_venv }}/bin:$PATH"
# Display setup
export DISPLAY=:10.0
# Required API Key Vars:
export HF_TOKEN=0000
export OPENAI_API_KEY=0000
# Helpful aliases
alias agent_s_cd='cd {{ agent_s_repo }}'
alias agent_s_start='cd {{ agent_s_repo }} && source {{ agent_s_venv }}/bin/activate'
echo "Agent S Environment Ready"
echo " Virtual Env: {{ agent_s_venv }}"
echo " Repository: {{ agent_s_repo }}"
echo ""
echo "Quick commands:"
echo " agent_s_cd - Change to Agent S directory"
echo " agent_s_start - Activate environment and change to repo"

347
ansible/caliban/deploy.yml Normal file
View File

@@ -0,0 +1,347 @@
---
- name: Deploy Agent S Computer Use Agent
hosts: agent_s
become: yes
vars:
system_user: "{{ ansible_user }}"
agent_s_venv: "/home/{{ system_user }}/env/agents"
agent_s_repo: "/home/{{ system_user }}/gh/Agent-S"
chrome_deb_url: "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"
tasks:
# Disable snap - doesn't work in containers with AppArmor disabled
- name: Prevent snapd from being installed
copy:
dest: /etc/apt/preferences.d/nosnap.pref
content: |
Package: snapd
Pin: release a=*
Pin-Priority: -10
mode: '0644'
- name: Update apt cache
apt:
update_cache: yes
cache_valid_time: 3600
# Firefox Setup, must be in place before desktop install to remove snap dependency
- name: Create APT keyrings directory
file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
- name: Download Mozilla APT signing key
get_url:
url: https://packages.mozilla.org/apt/repo-signing-key.gpg
dest: /etc/apt/keyrings/packages.mozilla.org.asc
mode: '0644'
- name: Add Mozilla APT repository
apt_repository:
repo: "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main"
filename: mozilla
state: present
- name: Set Firefox package priority to prefer Mozilla repo
copy:
dest: /etc/apt/preferences.d/mozilla
content: |
Package: *
Pin: origin packages.mozilla.org
Pin-Priority: 1000
mode: '0644'
- name: Update apt cache after adding Mozilla repo
apt:
update_cache: yes
- name: Install Firefox from Mozilla repo
apt:
name: firefox
state: present
# Desktop Environment - MATE for better AT-SPI accessibility support
- name: Install MATE desktop environment
apt:
name:
- ubuntu-mate-desktop
state: present
# AT-SPI Accessibility Stack
- name: Install AT-SPI accessibility infrastructure
apt:
name:
- at-spi2-core
- libatk-adaptor
- libatk1.0-0
- libatk-bridge2.0-0
state: present
- name: Configure AT-SPI environment for accessibility
copy:
dest: /etc/profile.d/atspi.sh
content: |
# Enable AT-SPI accessibility bridge
export GTK_MODULES=gail:atk-bridge
export NO_AT_BRIDGE=0
export ACCESSIBILITY_ENABLED=1
mode: '0644'
- name: Configure GPU environment for direct rendering
copy:
dest: /etc/profile.d/gpu.sh
content: |
# Force GPU rendering via AMD render node
export DRI_PRIME=1
export LIBVA_DRIVER_NAME=radeonsi
export MESA_LOADER_DRIVER_OVERRIDE=radeonsi
# Chrome/Chromium GPU flags
export CHROMIUM_FLAGS="--enable-gpu-rasterization --enable-zero-copy --use-gl=egl"
mode: '0644'
# Sound Support
- name: Install sound support packages
apt:
name:
- git
- libpulse-dev
- autoconf
- m4
- intltool
- build-essential
- dpkg-dev
state: present
# Mouse, Assistive Technology, and Python
- name: Install assistive technology and Python packages
apt:
name:
- python3-tk
- python3-dev
- python3-pyatspi
- python3-gi
- gnome-screenshot
- python3-venv
- python3-pip
state: present
# OCR
- name: Install OCR support
apt:
name:
- tesseract-ocr
state: present
# GPU Drivers - AMD Mesa (radeonsi/RADV)
- name: Install AMD GPU drivers and utilities
apt:
name:
- mesa-utils
- mesa-utils-extra
- mesa-vulkan-drivers
- vulkan-tools
- libgl1-mesa-dri
- libglx-mesa0
- libglu1-mesa
- libdrm2
- libdrm-amdgpu1
- libegl1
- libegl-mesa0
- libgbm1
- vainfo
- mesa-va-drivers
state: present
# VirtualGL for GPU-accelerated remote rendering
- name: Check if VirtualGL is installed
command: dpkg -s virtualgl
register: virtualgl_check
failed_when: false
changed_when: false
- name: Download VirtualGL
get_url:
url: https://github.com/VirtualGL/virtualgl/releases/download/3.1.2/virtualgl_3.1.2_amd64.deb
dest: /tmp/virtualgl.deb
mode: '0644'
when: virtualgl_check.rc != 0
- name: Install VirtualGL
apt:
deb: /tmp/virtualgl.deb
state: present
when: virtualgl_check.rc != 0
# GPU Permissions - Add user to video and render groups for DRI access
- name: Add user to video group for GPU access
user:
name: "{{ system_user }}"
groups: video
append: yes
- name: Add user to render group for GPU render node access
user:
name: "{{ system_user }}"
groups: render
append: yes
- name: Create udev rules for GPU device permissions
copy:
dest: /etc/udev/rules.d/99-gpu-permissions.rules
content: |
# Allow video group access to DRI devices
SUBSYSTEM=="drm", KERNEL=="card*", MODE="0666"
SUBSYSTEM=="drm", KERNEL=="renderD*", MODE="0666"
mode: '0644'
notify: Reload udev
# Fix GPU permissions on container start (LXC passthrough doesn't honor udev)
- name: Create systemd service to fix GPU permissions on boot
copy:
dest: /etc/systemd/system/fix-gpu-permissions.service
content: |
[Unit]
Description=Fix GPU device permissions for LXC passthrough
After=local-fs.target
[Service]
Type=oneshot
ExecStart=/bin/chmod 666 /dev/dri/card2 /dev/dri/renderD129
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
mode: '0644'
notify: Reload systemd
- name: Enable GPU permissions fix service
systemd:
name: fix-gpu-permissions
enabled: yes
state: started
daemon_reload: yes
# Create dl directory
- name: Create download directory
become: no
file:
path: "/home/{{ system_user }}/dl"
state: directory
mode: '0755'
# Chrome Installation
- name: Download Google Chrome
get_url:
url: "{{ chrome_deb_url }}"
dest: /tmp/google-chrome-stable_current_amd64.deb
mode: '0644'
- name: Install Google Chrome
apt:
deb: /tmp/google-chrome-stable_current_amd64.deb
state: present
- name: Clean up Chrome installer
file:
path: /tmp/google-chrome-stable_current_amd64.deb
state: absent
# Chrome GPU Configuration - Use ANGLE+Vulkan to bypass broken GLX in XRDP
- name: Create Chrome policies directory
file:
path: /etc/opt/chrome/policies/managed
state: directory
mode: '0755'
- name: Configure Chrome GPU policy
copy:
dest: /etc/opt/chrome/policies/managed/gpu-policy.json
content: |
{
"HardwareAccelerationModeEnabled": true
}
mode: '0644'
- name: Create Chrome Vulkan launcher
copy:
dest: /usr/share/applications/google-chrome-vulkan.desktop
content: |
[Desktop Entry]
Version=1.0
Name=Google Chrome (Vulkan)
GenericName=Web Browser
Exec=/usr/bin/google-chrome-stable --ignore-gpu-blocklist --use-gl=angle --use-angle=vulkan --enable-features=Vulkan,DefaultANGLEVulkan,VulkanFromANGLE,CanvasOopRasterization --enable-gpu-rasterization --canvas-oop-rasterization %U
Terminal=false
Icon=google-chrome
Type=Application
Categories=Network;WebBrowser;
mode: '0644'
# Python Virtual Environment Setup
- name: Create virtual environment directory
become: no
file:
path: "/home/{{ system_user }}/env"
state: directory
mode: '0755'
- name: Create Python virtual environment with system site packages
become: no
command: python3 -m venv --system-site-packages {{ agent_s_venv }}
args:
creates: "{{ agent_s_venv }}/bin/activate"
- name: Install Python packages in virtual environment
become: no
pip:
name:
- lxml
- pillow
- setuptools
virtualenv: "{{ agent_s_venv }}"
state: present
# Clone Agent-S Repository
- name: Create gh directory
become: no
file:
path: "/home/{{ system_user }}/gh"
state: directory
mode: '0755'
- name: Clone Agent-S repository
become: no
git:
repo: https://github.com/simular-ai/Agent-S.git
dest: "{{ agent_s_repo }}"
version: main
update: yes
- name: Create environment activation script
become: no
template:
src: agent_s_env.j2
dest: "/home/{{ system_user }}/.agent_s_env"
mode: '0644'
- name: Create XRDP Xorg config directory
file:
path: /etc/X11/xrdp
state: directory
mode: '0755'
- name: Deploy XRDP Xorg configuration for 1024x1024 resolution
template:
src: xorg.conf.j2
dest: /etc/X11/xrdp/xorg.conf
mode: '0644'
handlers:
- name: Reload systemd
systemd:
daemon_reload: yes
- name: Reload udev
shell: udevadm control --reload-rules && udevadm trigger
become: yes

View File

@@ -0,0 +1,72 @@
# XRDP Xorg configuration - Fixed 1024x768 resolution for Agent-S / UI-TARS compatibility
# Deployed by Ansible to /etc/X11/xrdp/xorg.conf
Section "ServerLayout"
Identifier "X11 Server"
Screen "Screen (xrdpdev)"
InputDevice "xrdpMouse" "CorePointer"
InputDevice "xrdpKeyboard" "CoreKeyboard"
EndSection
Section "ServerFlags"
# Prevent other ServerLayout sections from overriding this one
Option "DefaultServerLayout" "X11 Server"
Option "DontVTSwitch" "on"
Option "AutoAddDevices" "off"
Option "AutoAddGPU" "off"
EndSection
Section "Module"
Load "dbe"
Load "ddc"
Load "extmod"
Load "glx"
Load "int10"
Load "record"
Load "vbe"
Load "xorgxrdp"
Load "fb"
EndSection
Section "InputDevice"
Identifier "xrdpKeyboard"
Driver "xrdpkeyb"
EndSection
Section "InputDevice"
Identifier "xrdpMouse"
Driver "xrdpmouse"
EndSection
Section "Monitor"
Identifier "Monitor"
Option "DPMS"
HorizSync 30-80
VertRefresh 60-75
# Fixed resolution for Agent-S / UI-TARS compatibility
Modeline "1024x768" 63.50 1024 1072 1176 1328 768 771 775 798 -hsync +vsync
# Fallback resolutions required by xrdpdev driver
Modeline "800x600" 38.25 800 832 912 1024 600 603 607 624 -hsync +vsync
Modeline "640x480" 23.75 640 664 720 800 480 483 487 500 -hsync +vsync
EndSection
Section "Device"
Identifier "Video Card (xrdpdev)"
Driver "xrdpdev"
Option "DRMDevice" "/dev/dri/renderD129"
Option "DRI3" "1"
Option "DRMAllowList" "amdgpu"
EndSection
Section "Screen"
Identifier "Screen (xrdpdev)"
Device "Video Card (xrdpdev)"
Monitor "Monitor"
DefaultDepth 24
SubSection "Display"
Depth 24
# Fixed resolution - 1024x768 with fallbacks for xrdpdev driver
Modes "1024x768" "800x600" "640x480"
Virtual 1024 768
EndSubSection
EndSection

154
ansible/casdoor/app.conf.j2 Normal file
View File

@@ -0,0 +1,154 @@
# -----------------------------------------------------------------------------
# Casdoor Application Configuration
# -----------------------------------------------------------------------------
# Generated by Ansible - do not edit manually
# See: https://casdoor.org/docs/basic/server-installation
# -----------------------------------------------------------------------------
appname = casdoor
httpport = {{ casdoor_port | default(8000) }}
runmode = {{ casdoor_runmode | default('prod') }}
copyrequestbody = true
# -----------------------------------------------------------------------------
# Database Configuration
# -----------------------------------------------------------------------------
# Connects to native PostgreSQL on localhost (deployed by postgresql_ssl playbook)
driverName = postgres
dataSourceName = user={{ casdoor_db_user }} password={{ casdoor_db_password }} host=localhost port={{ casdoor_db_port | default(5432) }} sslmode={{ casdoor_db_sslmode | default('disable') }} dbname={{ casdoor_db_name }}
dbName = {{ casdoor_db_name }}
tableNamePrefix =
showSql = {{ casdoor_showsql | default('false') }}
# -----------------------------------------------------------------------------
# Cache Configuration
# -----------------------------------------------------------------------------
redisEndpoint = {{ casdoor_redis_endpoint | default('') }}
# -----------------------------------------------------------------------------
# Storage Configuration
# -----------------------------------------------------------------------------
# OCI Object Storage via S3-compatible API
defaultStorageProvider = {{ casdoor_default_storage_provider | default('') }}
{% if casdoor_s3_endpoint is defined and casdoor_s3_endpoint %}
storageProvider = {
"owner": "admin",
"name": "oci-s3",
"createdTime": "",
"displayName": "OCI Object Storage",
"category": "Storage",
"type": "AWS S3",
"subType": "",
"method": "",
"clientId": "{{ casdoor_s3_access_key }}",
"clientSecret": "{{ casdoor_s3_secret_key }}",
"clientId2": "",
"clientSecret2": "",
"cert": "",
"customAuthUrl": "",
"customScope": "",
"customTokenUrl": "",
"customUserInfoUrl": "",
"customLogo": "",
"scopes": "",
"userMapping": null,
"host": "",
"port": 0,
"disableSsl": false,
"title": "",
"content": "",
"receiver": "",
"regionId": "{{ casdoor_s3_region | default('ca-toronto-1') }}",
"signName": "",
"templateCode": "",
"appId": "",
"endpoint": "https://{{ casdoor_s3_endpoint }}",
"intranetEndpoint": "",
"domain": "{{ casdoor_s3_bucket }}",
"bucket": "{{ casdoor_s3_bucket }}",
"pathPrefix": "",
"metadata": "",
"idP": "",
"issuerUrl": "",
"enableSignAuthnRequest": false,
"providerUrl": ""
}
{% endif %}
# -----------------------------------------------------------------------------
# Security Configuration
# -----------------------------------------------------------------------------
isCloudIntranet = false
authState = {{ casdoor_auth_state | default(casdoor_secret_key) }}
socks5Proxy =
verificationCodeTimeout = 10
initScore = 0
logPostOnly = true
isUsernameLowered = false
# -----------------------------------------------------------------------------
# Origin Configuration
# -----------------------------------------------------------------------------
# Must match the external URL used to access Casdoor
origin = {{ casdoor_origin }}
originFrontend = {{ casdoor_origin_frontend | default(casdoor_origin) }}
staticBaseUrl = "https://cdn.casbin.org"
# -----------------------------------------------------------------------------
# Application Settings
# -----------------------------------------------------------------------------
isDemoMode = false
batchSize = 100
enableErrorMask = true
enableGzip = true
# Session timeout in minutes
inactiveTimeoutMinutes = {{ casdoor_inactive_timeout_minutes | default(60) }}
# -----------------------------------------------------------------------------
# Theme Configuration
# -----------------------------------------------------------------------------
themeData = {"themeType": "default", "colorPrimary": "#ffa415", "borderRadius": 6, "isCompact": false}
# -----------------------------------------------------------------------------
# LDAP Configuration
# -----------------------------------------------------------------------------
ldapServerPort = {{ casdoor_ldap_server_port | default(0) }}
ldapsCertId = {{ casdoor_ldaps_cert_id | default('') }}
ldapsServerPort = {{ casdoor_ldaps_server_port | default(0) }}
# -----------------------------------------------------------------------------
# RADIUS Configuration
# -----------------------------------------------------------------------------
radiusServerPort = {{ casdoor_radius_server_port | default(0) }}
radiusDefaultOrganization = {{ casdoor_radius_default_organization | default('built-in') }}
radiusSecret = {{ casdoor_radius_secret | default('') }}
# -----------------------------------------------------------------------------
# Resource Quotas
# -----------------------------------------------------------------------------
quota = {"organization": -1, "user": -1, "application": -1, "provider": -1}
# -----------------------------------------------------------------------------
# Logging Configuration
# -----------------------------------------------------------------------------
logConfig = {"adapter":"console"}
# -----------------------------------------------------------------------------
# Initialization
# -----------------------------------------------------------------------------
initDataNewOnly = true
initDataFile = "/conf/init_data.json"
frontendBaseDir = "../cc_0"

155
ansible/casdoor/deploy.yml Normal file
View File

@@ -0,0 +1,155 @@
---
# -----------------------------------------------------------------------------
# Casdoor Deployment Playbook
# -----------------------------------------------------------------------------
# Deploys Casdoor SSO Docker container
# Host: titania.incus (Incus container)
# Endpoint: id.ouranos.helu.ca via HAProxy on Titania
#
# Prerequisites:
# - postgresql_ssl must be deployed first (provides the database)
# - Docker must be installed
# - Alloy must be configured for syslog
#
# Secrets are fetched from Ansible Vault via group_vars/all/vault.yml
# -----------------------------------------------------------------------------
- name: Deploy Casdoor
hosts: ubuntu
tasks:
- name: Check if host has casdoor service
ansible.builtin.set_fact:
has_casdoor_service: "{{ 'casdoor' in services | default([]) }}"
- name: Skip hosts without casdoor service
ansible.builtin.meta: end_host
when: not has_casdoor_service
# -------------------------------------------------------------------------
# Create User and Group (system-assigned UID/GID)
# -------------------------------------------------------------------------
- name: Create casdoor group
become: true
ansible.builtin.group:
name: "{{ casdoor_group }}"
system: true
- name: Create casdoor user
become: true
ansible.builtin.user:
name: "{{ casdoor_user }}"
comment: "Casdoor service account"
group: "{{ casdoor_group }}"
system: true
create_home: false
shell: /usr/sbin/nologin
- name: Add ansible_user to casdoor group
become: true
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: "{{ casdoor_group }}"
append: true
# -------------------------------------------------------------------------
# Query uid/gid for Docker container user
# -------------------------------------------------------------------------
- name: Get casdoor user uid
ansible.builtin.shell: |
getent passwd {{ casdoor_user }} | cut -d: -f3
register: casdoor_uid_result
changed_when: false
- name: Get casdoor group gid
ansible.builtin.shell: |
getent group {{ casdoor_group }} | cut -d: -f3
register: casdoor_gid_result
changed_when: false
- name: Set uid/gid facts
ansible.builtin.set_fact:
casdoor_uid: "{{ casdoor_uid_result.stdout }}"
casdoor_gid: "{{ casdoor_gid_result.stdout }}"
# -------------------------------------------------------------------------
# Create Directories
# -------------------------------------------------------------------------
- name: Create casdoor base directory
become: true
ansible.builtin.file:
path: "{{ casdoor_directory }}"
owner: "{{ casdoor_user }}"
group: "{{ casdoor_group }}"
state: directory
mode: '0750'
- name: Create casdoor conf directory
become: true
ansible.builtin.file:
path: "{{ casdoor_directory }}/conf"
owner: "{{ casdoor_user }}"
group: "{{ casdoor_group }}"
state: directory
mode: '0750'
# -------------------------------------------------------------------------
# Template Configuration Files
# -------------------------------------------------------------------------
- name: Template docker-compose.yml
become: true
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{ casdoor_directory }}/docker-compose.yml"
owner: "{{ casdoor_user }}"
group: "{{ casdoor_group }}"
mode: '0640'
notify: restart casdoor
- name: Template app.conf
become: true
ansible.builtin.template:
src: app.conf.j2
dest: "{{ casdoor_directory }}/conf/app.conf"
owner: "{{ casdoor_user }}"
group: "{{ casdoor_group }}"
mode: '0640'
notify: restart casdoor
- name: Template init_data.json
become: true
ansible.builtin.template:
src: init_data.json.j2
dest: "{{ casdoor_directory }}/conf/init_data.json"
owner: "{{ casdoor_user }}"
group: "{{ casdoor_group }}"
mode: '0640'
notify: restart casdoor
# -------------------------------------------------------------------------
# Reset SSH Connection (apply group changes)
# -------------------------------------------------------------------------
- name: Reset SSH connection to apply group changes
ansible.builtin.meta: reset_connection
# -------------------------------------------------------------------------
# Start Services
# -------------------------------------------------------------------------
- name: Start Casdoor service
become: true
community.docker.docker_compose_v2:
project_src: "{{ casdoor_directory }}"
state: present
pull: always
handlers:
- name: restart casdoor
become: true
community.docker.docker_compose_v2:
project_src: "{{ casdoor_directory }}"
state: restarted

View File

@@ -0,0 +1,34 @@
# -----------------------------------------------------------------------------
# Casdoor Docker Compose
# -----------------------------------------------------------------------------
# Casdoor SSO - connects to native PostgreSQL on localhost
# Generated by Ansible - do not edit manually
# -----------------------------------------------------------------------------
services:
# ---------------------------------------------------------------------------
# Casdoor - SSO Identity Provider
# ---------------------------------------------------------------------------
casdoor:
image: casbin/casdoor:latest
pull_policy: always
container_name: casdoor
network_mode: host # Access localhost PostgreSQL directly
environment:
RUNNING_IN_DOCKER: "true"
user: "{{ casdoor_uid }}:{{ casdoor_gid }}"
volumes:
- ./conf:/conf:ro
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{ casdoor_syslog_port }}"
syslog-format: "{{ syslog_format | default('rfc3164') }}"
tag: "casdoor"
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://localhost:{{ casdoor_port }}/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s

View File

@@ -0,0 +1,350 @@
{
"organizations": [
{
"owner": "admin",
"name": "heluca",
"displayName": "Helu.ca",
"websiteUrl": "https://helu.ca",
"favicon": "https://helu.ca/media/images/favicon.original.png",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"passwordType": "bcrypt",
"passwordSalt": "",
"passwordOptions": ["AtLeast6"],
"countryCodes": ["CA", "US"],
"defaultAvatar": "",
"defaultApplication": "angelia",
"tags": [],
"languages": ["en", "fr"],
"masterPassword": "",
"defaultPassword": "",
"initScore": 2000,
"enableSoftDeletion": false,
"isProfilePublic": true,
"useEmailAsUsername": true,
"disableSignin": false,
"accountItems": [
{"name": "Organization", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "ID", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Name", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Display name", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Avatar", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "User type", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Password", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Email", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Phone", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Country code", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Country/Region", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Location", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Address", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Affiliation", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Title", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Homepage", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Bio", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Roles", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Permissions", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Groups", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "3rd-party logins", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Properties", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is admin", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is forbidden", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is deleted", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Multi-factor authentication", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "WebAuthn credentials", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Managed accounts", "visible": true, "viewRule": "Self", "modifyRule": "Self"}
]
}
],
"applications": [
{
"owner": "admin",
"name": "angelia",
"displayName": "Helu.ca",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"homepageUrl": "https://helu.ca",
"organization": "heluca",
"cert": "cert-heluca",
"enablePassword": true,
"enableSignUp": true,
"disableSignin": false,
"clientId": "{{ vault_angelia_oauth_client_id }}",
"clientSecret": "{{ vault_angelia_oauth_client_secret }}",
"providers": [],
"signinMethods": [
{"name": "Password", "displayName": "Password", "rule": "All"},
{"name": "Verification code", "displayName": "Verification code", "rule": "All"},
{"name": "WebAuthn", "displayName": "WebAuthn", "rule": "None"}
],
"signupItems": [
{"name": "ID", "visible": false, "required": true, "prompted": false, "rule": "Random"},
{"name": "Email", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Display name", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Confirm password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Agreement", "visible": true, "required": true, "prompted": false, "rule": "None"}
],
"grantTypes": [
"authorization_code",
"password",
"client_credentials",
"token",
"id_token",
"refresh_token"
],
"redirectUris": [
"https://ouranos.helu.ca/callback"
],
"tokenFormat": "JWT",
"tokenFields": [],
"expireInHours": 168,
"failedSigninLimit": 5,
"failedSigninFrozenTime": 15,
"formCss": "<style>.login-panel{background-color:#ffffff;border-radius:10px;box-shadow:0 0 30px 20px rgba(255,164,21,0.12)}.ant-btn-primary{background-color:#4b96ff!important;border-color:#4b96ff!important}.ant-btn-primary:hover{background-color:#58c0ff!important;border-color:#58c0ff!important}a{color:#ffa415}a:hover{color:#ffc219}.ant-input:focus,.ant-input-focused{border-color:#4b96ff!important;box-shadow:0 0 0 2px rgba(75,150,255,0.2)!important}.ant-checkbox-checked .ant-checkbox-inner{background-color:#4b96ff!important;border-color:#4b96ff!important}</style>",
"footerHtml": "<div style=\"text-align:center;padding:10px;color:#666;\"><a href=\"https://helu.ca\" style=\"color:#4b96ff;text-decoration:none;\">Powered by Helu.ca</a></div>"
},
{
"owner": "admin",
"name": "gitea",
"displayName": "Gitea",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"homepageUrl": "https://gitea.ouranos.helu.ca",
"organization": "heluca",
"cert": "cert-heluca",
"enablePassword": true,
"enableSignUp": false,
"clientId": "{{ vault_gitea_oauth_client_id }}",
"clientSecret": "{{ vault_gitea_oauth_client_secret }}",
"providers": [],
"signinMethods": [
{"name": "Password", "displayName": "Password", "rule": "All"}
],
"signupItems": [
{"name": "ID", "visible": false, "required": true, "prompted": false, "rule": "Random"},
{"name": "Email", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Display name", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Confirm password", "visible": true, "required": true, "prompted": false, "rule": "None"}
],
"grantTypes": [
"authorization_code",
"refresh_token"
],
"redirectUris": [
"https://gitea.ouranos.helu.ca/user/oauth2/casdoor/callback"
],
"tokenFormat": "JWT",
"expireInHours": 168,
"formCss": "<style>.login-panel{background-color:#ffffff;border-radius:10px;box-shadow:0 0 30px 20px rgba(255,164,21,0.12)}.ant-btn-primary{background-color:#4b96ff!important;border-color:#4b96ff!important}.ant-btn-primary:hover{background-color:#58c0ff!important;border-color:#58c0ff!important}a{color:#ffa415}a:hover{color:#ffc219}</style>",
"footerHtml": "<div style=\"text-align:center;padding:10px;color:#666;\"><a href=\"https://helu.ca\" style=\"color:#4b96ff;text-decoration:none;\">Powered by Helu.ca</a></div>"
},
{
"owner": "admin",
"name": "jupyterlab",
"displayName": "JupyterLab",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"homepageUrl": "https://jupyterlab.ouranos.helu.ca",
"organization": "heluca",
"cert": "cert-heluca",
"enablePassword": true,
"enableSignUp": false,
"clientId": "{{ vault_jupyterlab_oauth_client_id }}",
"clientSecret": "{{ vault_jupyterlab_oauth_client_secret }}",
"providers": [],
"signinMethods": [
{"name": "Password", "displayName": "Password", "rule": "All"}
],
"signupItems": [
{"name": "ID", "visible": false, "required": true, "prompted": false, "rule": "Random"},
{"name": "Email", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Display name", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Confirm password", "visible": true, "required": true, "prompted": false, "rule": "None"}
],
"grantTypes": [
"authorization_code",
"refresh_token"
],
"redirectUris": [
"https://jupyterlab.ouranos.helu.ca/oauth2/callback"
],
"tokenFormat": "JWT",
"expireInHours": 168,
"formCss": "<style>.login-panel{background-color:#ffffff;border-radius:10px;box-shadow:0 0 30px 20px rgba(255,164,21,0.12)}.ant-btn-primary{background-color:#4b96ff!important;border-color:#4b96ff!important}.ant-btn-primary:hover{background-color:#58c0ff!important;border-color:#58c0ff!important}a{color:#ffa415}a:hover{color:#ffc219}</style>",
"footerHtml": "<div style=\"text-align:center;padding:10px;color:#666;\"><a href=\"https://helu.ca\" style=\"color:#4b96ff;text-decoration:none;\">Powered by Helu.ca</a></div>"
},
{
"owner": "admin",
"name": "searxng",
"displayName": "SearXNG",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"homepageUrl": "https://searxng.ouranos.helu.ca",
"organization": "heluca",
"cert": "cert-heluca",
"enablePassword": true,
"enableSignUp": false,
"clientId": "{{ vault_searxng_oauth_client_id }}",
"clientSecret": "{{ vault_searxng_oauth_client_secret }}",
"providers": [],
"signinMethods": [
{"name": "Password", "displayName": "Password", "rule": "All"}
],
"signupItems": [
{"name": "ID", "visible": false, "required": true, "prompted": false, "rule": "Random"},
{"name": "Email", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Display name", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Confirm password", "visible": true, "required": true, "prompted": false, "rule": "None"}
],
"grantTypes": [
"authorization_code",
"refresh_token"
],
"redirectUris": [
"https://searxng.ouranos.helu.ca/oauth2/callback"
],
"tokenFormat": "JWT",
"expireInHours": 168,
"formCss": "<style>.login-panel{background-color:#ffffff;border-radius:10px;box-shadow:0 0 30px 20px rgba(255,164,21,0.12)}.ant-btn-primary{background-color:#4b96ff!important;border-color:#4b96ff!important}.ant-btn-primary:hover{background-color:#58c0ff!important;border-color:#58c0ff!important}a{color:#ffa415}a:hover{color:#ffc219}</style>",
"footerHtml": "<div style=\"text-align:center;padding:10px;color:#666;\"><a href=\"https://helu.ca\" style=\"color:#4b96ff;text-decoration:none;\">Powered by Helu.ca</a></div>"
},
{
"owner": "admin",
"name": "openwebui",
"displayName": "Open WebUI",
"logo": "https://helu.ca/media/images/helu-ca_logo.original.svg",
"homepageUrl": "https://openwebui.ouranos.helu.ca",
"organization": "heluca",
"cert": "cert-heluca",
"enablePassword": true,
"enableSignUp": false,
"clientId": "{{ vault_openwebui_oauth_client_id }}",
"clientSecret": "{{ vault_openwebui_oauth_client_secret }}",
"providers": [],
"signinMethods": [
{"name": "Password", "displayName": "Password", "rule": "All"}
],
"signupItems": [
{"name": "ID", "visible": false, "required": true, "prompted": false, "rule": "Random"},
{"name": "Email", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Display name", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Password", "visible": true, "required": true, "prompted": false, "rule": "None"},
{"name": "Confirm password", "visible": true, "required": true, "prompted": false, "rule": "None"}
],
"grantTypes": [
"authorization_code",
"refresh_token"
],
"redirectUris": [
"https://openwebui.ouranos.helu.ca/oauth/oidc/callback"
],
"tokenFormat": "JWT",
"expireInHours": 168,
"formCss": "<style>.login-panel{background-color:#ffffff;border-radius:10px;box-shadow:0 0 30px 20px rgba(255,164,21,0.12)}.ant-btn-primary{background-color:#4b96ff!important;border-color:#4b96ff!important}.ant-btn-primary:hover{background-color:#58c0ff!important;border-color:#58c0ff!important}a{color:#ffa415}a:hover{color:#ffc219}</style>",
"footerHtml": "<div style=\"text-align:center;padding:10px;color:#666;\"><a href=\"https://helu.ca\" style=\"color:#4b96ff;text-decoration:none;\">Powered by Helu.ca</a></div>"
}
],
"users": [
{
"owner": "heluca",
"name": "robert@helu.ca",
"type": "normal-user",
"password": "ChangeMe!",
"displayName": "Heluca",
"avatar": "",
"email": "robert@helu.ca",
"phone": "",
"countryCode": "CA",
"address": [],
"affiliation": "Helu.ca",
"tag": "owner",
"title": "Owner",
"score": 2000,
"ranking": 1,
"isAdmin": true,
"isForbidden": false,
"isDeleted": false,
"signupApplication": "angelia",
"createdIp": "",
"groups": []
},
{
"owner": "heluca",
"name": "r@helu.ca",
"type": "normal-user",
"password": "ChangeMe!",
"displayName": "Robert",
"avatar": "",
"email": "r@helu.ca",
"phone": "",
"countryCode": "CA",
"address": [],
"affiliation": "Helu.ca",
"tag": "sysadmin",
"title": "Owner",
"bio": "",
"score": 2000,
"ranking": 2,
"isAdmin": false,
"isForbidden": false,
"isDeleted": false,
"signupApplication": "angelia",
"createdIp": "",
"groups": []
}
],
"providers": [
{
"owner": "admin",
"name": "provider-email-smtp4dev",
"displayName": "smtp4dev Email",
"category": "Email",
"type": "SMTP",
"host": "{{ smtp_host }}",
"port": {{ smtp_port }},
"disableSsl": true,
"fromAddress": "{{ smtp_from }}",
"fromName": "{{ smtp_from_name }}",
"clientSecret": ""
}
],
"certs": [
{
"owner": "admin",
"name": "cert-built-in",
"displayName": "Built-in Certificate",
"scope": "JWT",
"type": "x509",
"cryptoAlgorithm": "RS256",
"bitSize": 4096,
"expireInYears": 20,
"certificate": "",
"privateKey": ""
},
{
"owner": "admin",
"name": "cert-heluca",
"displayName": "Helu.ca JWT Certificate",
"scope": "JWT",
"type": "x509",
"cryptoAlgorithm": "RS256",
"bitSize": 4096,
"expireInYears": 20,
"certificate": "",
"privateKey": ""
}
],
"ldaps": [],
"models": [],
"permissions": [],
"roles": [],
"groups": [],
"adapters": [],
"enforcers": [],
"plans": [],
"pricings": [],
"payments": [],
"products": [],
"resources": [],
"syncers": [],
"tokens": [],
"webhooks": []
}

View File

@@ -0,0 +1,524 @@
{
"organizations": [
{
"owner": "",
"name": "",
"displayName": "",
"websiteUrl": "",
"favicon": "",
"passwordType": "bcrypt",
"passwordSalt": "",
"passwordOptions": [
"AtLeast6"
],
"countryCodes": [
"US",
"GB",
"ES",
"FR",
"DE",
"CN",
"JP",
"KR",
"VN",
"ID",
"SG",
"IN",
"IT",
"MY",
"TR",
"DZ",
"IL",
"PH",
"NL",
"PL",
"FI",
"SE",
"UA",
"KZ",
"CZ",
"SK",
"AZ"
],
"defaultAvatar": "",
"defaultApplication": "",
"tags": [],
"languages": [
"en",
"es",
"fr",
"de",
"ja",
"zh",
"vi",
"pt",
"tr",
"pl",
"uk"
],
"masterPassword": "",
"defaultPassword": "",
"initScore": 2000,
"enableSoftDeletion": false,
"isProfilePublic": true,
"disableSignin": false,
"accountItems": [
{"name": "Organization", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "ID", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Name", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Display name", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Avatar", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "User type", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Password", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Email", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Phone", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Country code", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Country/Region", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Location", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Address", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Addresses", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Affiliation", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Title", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "ID card type", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "ID card", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Real name", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "ID verification", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Homepage", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Bio", "visible": true, "viewRule": "Public", "modifyRule": "Self"},
{"name": "Tag", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Signup application", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Register type", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Register source", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "Roles", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Permissions", "visible": true, "viewRule": "Public", "modifyRule": "Immutable"},
{"name": "Groups", "visible": true, "viewRule": "Public", "modifyRule": "Admin"},
{"name": "3rd-party logins", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Properties", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is admin", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is forbidden", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Is deleted", "visible": true, "viewRule": "Admin", "modifyRule": "Admin"},
{"name": "Multi-factor authentication", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "WebAuthn credentials", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "Managed accounts", "visible": true, "viewRule": "Self", "modifyRule": "Self"},
{"name": "MFA accounts", "visible": true, "viewRule": "Self", "modifyRule": "Self"}
]
}
],
"applications": [
{
"owner": "",
"name": "",
"displayName": "",
"logo": "",
"homepageUrl": "",
"organization": "",
"cert": "",
"enablePassword": true,
"enableSignUp": true,
"disableSignin": false,
"clientId": "",
"clientSecret": "",
"providers": [
{
"name": "",
"canSignUp": true,
"canSignIn": true,
"canUnlink": false,
"prompted": false,
"alertType": "None"
}
],
"signinMethods": [
{
"name": "Password",
"displayName": "Password",
"rule": "All"
},
{
"name": "Verification code",
"displayName": "Verification code",
"rule": "All"
},
{
"name": "WebAuthn",
"displayName": "WebAuthn",
"rule": "None"
},
{
"name": "Face ID",
"displayName": "Face ID",
"rule": "None"
}
],
"signupItems": [
{
"name": "ID",
"visible": false,
"required": true,
"prompted": false,
"rule": "Random"
},
{
"name": "Username",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Display name",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Password",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Confirm password",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Email",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Phone",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
},
{
"name": "Agreement",
"visible": true,
"required": true,
"prompted": false,
"rule": "None"
}
],
"grantTypes": [
"authorization_code",
"password",
"client_credentials",
"token",
"id_token",
"refresh_token"
],
"redirectUris": [
"http://localhost:9000/callback"
],
"tokenFormat": "JWT",
"tokenFields": [],
"expireInHours": 168,
"failedSigninLimit": 5,
"failedSigninFrozenTime": 15
}
],
"users": [
{
"owner": "",
"name": "",
"type": "normal-user",
"password": "",
"displayName": "",
"avatar": "",
"email": "",
"phone": "",
"countryCode": "",
"address": [],
"addresses": [],
"affiliation": "",
"tag": "",
"score": 2000,
"ranking": 1,
"isAdmin": true,
"isForbidden": false,
"isDeleted": false,
"signupApplication": "",
"createdIp": "",
"groups": []
}
],
"providers": [
{
"owner": "",
"name": "",
"displayName": "",
"category": "",
"type": ""
}
],
"certs": [
{
"owner": "",
"name": "",
"displayName": "",
"scope": "JWT",
"type": "x509",
"cryptoAlgorithm": "RS256",
"bitSize": 4096,
"expireInYears": 20,
"certificate": "",
"privateKey": ""
}
],
"ldaps": [
{
"id": "",
"owner": "",
"serverName": "",
"host": "",
"port": 389,
"username": "",
"password": "",
"baseDn": "",
"autoSync": 0,
"lastSync": ""
}
],
"models": [
{
"owner": "",
"name": "",
"modelText": "",
"displayName": ""
}
],
"permissions": [
{
"actions": [],
"displayName": "",
"effect": "",
"isEnabled": true,
"model": "",
"name": "",
"owner": "",
"resourceType": "",
"resources": [],
"roles": [],
"users": []
}
],
"payments": [
{
"currency": "",
"detail": "",
"displayName": "",
"invoiceRemark": "",
"invoiceTaxId": "",
"invoiceTitle": "",
"invoiceType": "",
"invoiceUrl": "",
"message": "",
"name": "",
"organization": "",
"owner": "",
"payUrl": "",
"personEmail": "",
"personIdCard": "",
"personName": "",
"personPhone": "",
"price": 0,
"productDisplayName": "",
"productName": "",
"provider": "",
"returnUrl": "",
"state": "",
"tag": "",
"type": "",
"user": ""
}
],
"products": [
{
"currency": "",
"detail": "",
"displayName": "",
"image": "",
"name": "",
"owner": "",
"price": 0,
"providers": [],
"quantity": 0,
"returnUrl": "",
"sold": 0,
"state": "",
"tag": ""
}
],
"resources": [
{
"owner": "",
"name": "",
"user": "",
"provider": "",
"application": "",
"tag": "",
"parent": "",
"fileName": "",
"fileType": "",
"fileFormat": "",
"url": "",
"description": ""
}
],
"roles": [
{
"displayName": "",
"isEnabled": true,
"name": "",
"owner": "",
"roles": [],
"users": []
}
],
"syncers": [
{
"affiliationTable": "",
"avatarBaseUrl": "",
"database": "",
"databaseType": "",
"errorText": "",
"host": "",
"isEnabled": false,
"name": "",
"organization": "",
"owner": "",
"password": "",
"port": 0,
"syncInterval": 0,
"table": "",
"tableColumns": [
{
"casdoorName": "",
"isHashed": true,
"name": "",
"type": "",
"values": []
}
],
"tablePrimaryKey": "",
"type": "",
"user": ""
}
],
"tokens": [
{
"accessToken": "",
"application": "",
"code": "",
"codeChallenge": "",
"codeExpireIn": 0,
"codeIsUsed": true,
"createdTime": "",
"expiresIn": 0,
"name": "",
"organization": "",
"owner": "",
"refreshToken": "",
"scope": "",
"tokenType": "",
"user": ""
}
],
"webhooks": [
{
"contentType": "",
"events": [],
"headers": [
{
"name": "",
"value": ""
}
],
"isEnabled": true,
"isUserExtended": true,
"method": "",
"name": "",
"organization": "",
"owner": "",
"url": ""
}
],
"groups": [
{
"owner": "",
"name": "",
"displayName": "",
"manager": "",
"contactEmail": "",
"type": "",
"parent_id": "",
"isTopGroup": true,
"title": "",
"key": "",
"children": [],
"isEnabled": true
}
],
"adapters": [
{
"owner": "",
"name": "",
"table": "",
"useSameDb": true,
"type": "",
"databaseType": "",
"database": "",
"host": "",
"port": 0,
"user": "",
"password": ""
}
],
"enforcers": [
{
"owner": "",
"name": "",
"displayName": "",
"description": "",
"model": "",
"adapter": "",
"enforcer": ""
}
],
"plans": [
{
"owner": "",
"name": "",
"displayName": "",
"description": "",
"price": 0,
"currency": "",
"period": "",
"product": "",
"paymentProviders": [],
"isEnabled": true,
"role": ""
}
],
"pricings": [
{
"owner": "",
"name": "",
"displayName": "",
"description": "",
"plans": [],
"isEnabled": true,
"trialDuration": 0,
"application": ""
}
]
}

View File

@@ -0,0 +1,75 @@
---
# -----------------------------------------------------------------------------
# Casdoor Removal Playbook
# -----------------------------------------------------------------------------
# Removes Casdoor SSO including:
# - Docker containers and volumes
# - Configuration files
# - PostgreSQL data directory
# - Service user and group
#
# WARNING: This will permanently delete all Casdoor data including the database!
# -----------------------------------------------------------------------------
- name: Remove Casdoor
hosts: ubuntu
tasks:
- name: Check if host has casdoor service
ansible.builtin.set_fact:
has_casdoor_service: "{{ 'casdoor' in services | default([]) }}"
- name: Skip hosts without casdoor service
ansible.builtin.meta: end_host
when: not has_casdoor_service
# -------------------------------------------------------------------------
# Stop and Remove Docker Services
# -------------------------------------------------------------------------
- name: Check if docker-compose.yml exists
become: true
ansible.builtin.stat:
path: "{{ casdoor_directory }}/docker-compose.yml"
register: compose_file
- name: Stop and remove Casdoor containers
become: true
community.docker.docker_compose_v2:
project_src: "{{ casdoor_directory }}"
state: absent
remove_volumes: true
when: compose_file.stat.exists
# -------------------------------------------------------------------------
# Remove Data Directory
# -------------------------------------------------------------------------
- name: Remove casdoor directory and all data
become: true
ansible.builtin.file:
path: "{{ casdoor_directory }}"
state: absent
# -------------------------------------------------------------------------
# Remove User and Group
# -------------------------------------------------------------------------
- name: Remove ponos from casdoor group
become: true
ansible.builtin.command:
cmd: gpasswd -d ponos {{ casdoor_group }}
register: gpasswd_result
changed_when: gpasswd_result.rc == 0
failed_when: false
- name: Remove casdoor user
become: true
ansible.builtin.user:
name: "{{ casdoor_user }}"
state: absent
- name: Remove casdoor group
become: true
ansible.builtin.group:
name: "{{ casdoor_group }}"
state: absent

View File

@@ -0,0 +1,71 @@
#!/bin/bash
# Certificate metrics for Prometheus node_exporter textfile collector
# Managed by Ansible - DO NOT EDIT MANUALLY
#
# Writes metrics to: {{ prometheus_node_exporter_text_directory }}/ssl_cert.prom
# Metrics:
# ssl_certificate_expiry_timestamp - Unix timestamp when cert expires
# ssl_certificate_expiry_seconds - Seconds until expiry
# ssl_certificate_valid - 1 if valid, 0 if expired or missing
set -euo pipefail
METRICS_DIR="{{ prometheus_node_exporter_text_directory }}"
METRICS_FILE="${METRICS_DIR}/ssl_cert.prom"
CERT_FILE="{{ haproxy_cert_path }}"
DOMAIN="{{ haproxy_domain }}"
# Create temp file for atomic write
TEMP_FILE=$(mktemp "${METRICS_DIR}/.ssl_cert.prom.XXXXXX")
# Write metric headers
cat > "${TEMP_FILE}" << 'EOF'
# HELP ssl_certificate_expiry_timestamp Unix timestamp when the SSL certificate expires
# TYPE ssl_certificate_expiry_timestamp gauge
# HELP ssl_certificate_expiry_seconds Seconds until the SSL certificate expires
# TYPE ssl_certificate_expiry_seconds gauge
# HELP ssl_certificate_valid Whether the SSL certificate is valid (1) or expired/missing (0)
# TYPE ssl_certificate_valid gauge
EOF
if [[ -f "${CERT_FILE}" ]]; then
# Extract expiry date from certificate
EXPIRY_DATE=$(openssl x509 -enddate -noout -in "${CERT_FILE}" 2>/dev/null | cut -d= -f2)
if [[ -n "${EXPIRY_DATE}" ]]; then
# Convert to Unix timestamp
EXPIRY_TIMESTAMP=$(date -d "${EXPIRY_DATE}" +%s 2>/dev/null || echo "0")
CURRENT_TIMESTAMP=$(date +%s)
EXPIRY_SECONDS=$((EXPIRY_TIMESTAMP - CURRENT_TIMESTAMP))
# Check if certificate is valid (not expired)
if [[ ${EXPIRY_SECONDS} -gt 0 ]]; then
VALID=1
else
VALID=0
fi
# Extract issuer for label
ISSUER=$(openssl x509 -issuer -noout -in "${CERT_FILE}" 2>/dev/null | sed 's/.*O = \([^,]*\).*/\1/' | tr -d '"' || echo "unknown")
# Write metrics
echo "ssl_certificate_expiry_timestamp{domain=\"${DOMAIN}\",issuer=\"${ISSUER}\"} ${EXPIRY_TIMESTAMP}" >> "${TEMP_FILE}"
echo "ssl_certificate_expiry_seconds{domain=\"${DOMAIN}\",issuer=\"${ISSUER}\"} ${EXPIRY_SECONDS}" >> "${TEMP_FILE}"
echo "ssl_certificate_valid{domain=\"${DOMAIN}\",issuer=\"${ISSUER}\"} ${VALID}" >> "${TEMP_FILE}"
else
# Could not parse certificate
echo "ssl_certificate_expiry_timestamp{domain=\"${DOMAIN}\",issuer=\"unknown\"} 0" >> "${TEMP_FILE}"
echo "ssl_certificate_expiry_seconds{domain=\"${DOMAIN}\",issuer=\"unknown\"} 0" >> "${TEMP_FILE}"
echo "ssl_certificate_valid{domain=\"${DOMAIN}\",issuer=\"unknown\"} 0" >> "${TEMP_FILE}"
fi
else
# Certificate file does not exist
echo "ssl_certificate_expiry_timestamp{domain=\"${DOMAIN}\",issuer=\"none\"} 0" >> "${TEMP_FILE}"
echo "ssl_certificate_expiry_seconds{domain=\"${DOMAIN}\",issuer=\"none\"} 0" >> "${TEMP_FILE}"
echo "ssl_certificate_valid{domain=\"${DOMAIN}\",issuer=\"none\"} 0" >> "${TEMP_FILE}"
fi
# Set permissions and atomic move
chmod 644 "${TEMP_FILE}"
chown prometheus:prometheus "${TEMP_FILE}" 2>/dev/null || true
mv "${TEMP_FILE}" "${METRICS_FILE}"

323
ansible/certbot/deploy.yml Normal file
View File

@@ -0,0 +1,323 @@
---
# -----------------------------------------------------------------------------
# Certbot Deployment Playbook
# -----------------------------------------------------------------------------
# Deploys certbot with Namecheap DNS-01 validation for wildcard certificates
# Host: hippocamp.helu.ca (OCI HAProxy instance)
#
# Secrets are fetched automatically from OCI Vault via group_vars/all/secrets.yml
# -----------------------------------------------------------------------------
- name: Deploy Certbot with Namecheap DNS-01 Validation
hosts: ubuntu
vars:
ansible_common_remote_group: "{{ certbot_group | default(omit) }}"
allow_world_readable_tmpfiles: true
tags: [certbot, ssl, deploy]
handlers:
- name: restart certbot-renew timer
become: true
ansible.builtin.systemd:
name: certbot-renew.timer
state: restarted
daemon_reload: true
tasks:
- name: Check if host has certbot service
ansible.builtin.set_fact:
has_certbot_service: "{{ 'certbot' in services | default([]) }}"
- name: Skip hosts without certbot service
ansible.builtin.meta: end_host
when: not has_certbot_service
# -------------------------------------------------------------------------
# System Setup
# -------------------------------------------------------------------------
- name: Create certbot group
become: true
ansible.builtin.group:
name: "{{ certbot_group }}"
system: true
- name: Create certbot user
become: true
ansible.builtin.user:
name: "{{ certbot_user }}"
comment: "Certbot SSL Certificate Management"
group: "{{ certbot_group }}"
system: true
shell: /usr/sbin/nologin
home: "{{ certbot_directory }}"
create_home: false
- name: Add ansible user to certbot group
become: true
ansible.builtin.user:
name: "{{ ansible_user }}"
groups: "{{ certbot_group }}"
append: true
# -------------------------------------------------------------------------
# Directory Structure
# -------------------------------------------------------------------------
- name: Create certbot directories
become: true
ansible.builtin.file:
path: "{{ item }}"
owner: "{{ certbot_user }}"
group: "{{ certbot_group }}"
state: directory
mode: '0750'
loop:
- "{{ certbot_directory }}"
- "{{ certbot_directory }}/config"
- "{{ certbot_directory }}/work"
- "{{ certbot_directory }}/logs"
- "{{ certbot_directory }}/credentials"
- "{{ certbot_directory }}/hooks"
- name: Create haproxy group for certificate directory
become: true
ansible.builtin.group:
name: "{{ haproxy_group | default('haproxy') }}"
system: true
- name: Create haproxy user for certificate directory
become: true
ansible.builtin.user:
name: "{{ haproxy_user | default('haproxy') }}"
comment: "HAProxy Load Balancer"
group: "{{ haproxy_group | default('haproxy') }}"
system: true
shell: /usr/sbin/nologin
home: /nonexistent
create_home: false
- name: Create certificate output directory
become: true
ansible.builtin.file:
path: /etc/haproxy/certs
owner: "{{ certbot_user }}"
group: "{{ haproxy_group | default('haproxy') }}"
state: directory
mode: '0750'
# -------------------------------------------------------------------------
# Python Virtual Environment
# -------------------------------------------------------------------------
- name: Install Python venv package
become: true
ansible.builtin.apt:
name:
- python3-venv
- python3-pip
state: present
update_cache: true
- name: Create virtual environment
become: true
become_user: "{{ certbot_user }}"
ansible.builtin.command: python3 -m venv {{ certbot_directory }}/.venv
args:
creates: "{{ certbot_directory }}/.venv/bin/activate"
vars:
ansible_common_remote_group: "{{ certbot_group }}"
allow_world_readable_tmpfiles: true
- name: Upgrade pip in virtualenv
become: true
become_user: "{{ certbot_user }}"
ansible.builtin.pip:
name: pip
state: latest
virtualenv: "{{ certbot_directory }}/.venv"
vars:
ansible_common_remote_group: "{{ certbot_group }}"
allow_world_readable_tmpfiles: true
- name: Install certbot and Namecheap DNS plugin
become: true
become_user: "{{ certbot_user }}"
ansible.builtin.pip:
name:
- certbot
- certbot-dns-namecheap
state: present
virtualenv: "{{ certbot_directory }}/.venv"
vars:
ansible_common_remote_group: "{{ certbot_group }}"
allow_world_readable_tmpfiles: true
# -------------------------------------------------------------------------
# Namecheap Credentials
# -------------------------------------------------------------------------
- name: Get public IP for Namecheap API
ansible.builtin.uri:
url: https://ifconfig.me/ip
return_content: true
register: public_ip_result
delegate_to: localhost
become: false
- name: Set client IP fact
ansible.builtin.set_fact:
namecheap_client_ip: "{{ public_ip_result.content | trim }}"
- name: Template Namecheap credentials
become: true
ansible.builtin.template:
src: namecheap.ini.j2
dest: "{{ certbot_directory }}/credentials/namecheap.ini"
owner: "{{ certbot_user }}"
group: "{{ certbot_group }}"
mode: '0600'
# -------------------------------------------------------------------------
# Renewal Hooks
# -------------------------------------------------------------------------
- name: Template renewal hook script
become: true
ansible.builtin.template:
src: renewal-hook.sh.j2
dest: "{{ certbot_directory }}/hooks/renewal-hook.sh"
owner: "{{ certbot_user }}"
group: "{{ certbot_group }}"
mode: '0750'
- name: Template certificate metrics script
become: true
ansible.builtin.template:
src: cert-metrics.sh.j2
dest: "{{ certbot_directory }}/hooks/cert-metrics.sh"
owner: "{{ certbot_user }}"
group: "{{ certbot_group }}"
mode: '0750'
# -------------------------------------------------------------------------
# Initial Certificate Request
# -------------------------------------------------------------------------
- name: Check if certificate already exists
become: true
ansible.builtin.stat:
path: "{{ certbot_directory }}/config/live/{{ certbot_cert_name }}/fullchain.pem"
register: cert_exists
- name: Build domain arguments for certbot
ansible.builtin.set_fact:
certbot_domain_args: "{{ certbot_domains | map('regex_replace', '^', '-d ') | join(' ') }}"
- name: Request initial certificate
become: true
become_user: "{{ certbot_user }}"
ansible.builtin.shell: |
source {{ certbot_directory }}/.venv/bin/activate
certbot certonly \
--non-interactive \
--agree-tos \
--email {{ certbot_email }} \
--authenticator dns-namecheap \
--dns-namecheap-credentials {{ certbot_directory }}/credentials/namecheap.ini \
--dns-namecheap-propagation-seconds 120 \
--config-dir {{ certbot_directory }}/config \
--work-dir {{ certbot_directory }}/work \
--logs-dir {{ certbot_directory }}/logs \
--cert-name {{ certbot_cert_name }} \
{{ certbot_domain_args }}
args:
executable: /bin/bash
when: not cert_exists.stat.exists
register: certbot_request
- name: Run renewal hook after initial certificate
become: true
ansible.builtin.command: "{{ certbot_directory }}/hooks/renewal-hook.sh"
when: certbot_request.changed
# -------------------------------------------------------------------------
# Systemd Timer for Auto-Renewal
# -------------------------------------------------------------------------
- name: Create certbot renewal service
become: true
ansible.builtin.copy:
content: |
[Unit]
Description=Certbot Renewal
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
User={{ certbot_user }}
Group={{ certbot_group }}
ExecStart=/bin/bash -c 'source {{ certbot_directory }}/.venv/bin/activate && certbot renew --config-dir {{ certbot_directory }}/config --work-dir {{ certbot_directory }}/work --logs-dir {{ certbot_directory }}/logs --deploy-hook {{ certbot_directory }}/hooks/renewal-hook.sh'
PrivateTmp=true
dest: /etc/systemd/system/certbot-renew.service
mode: '0644'
notify: restart certbot-renew timer
- name: Create certbot renewal timer
become: true
ansible.builtin.copy:
content: |
[Unit]
Description=Run certbot renewal twice daily
[Timer]
OnCalendar=*-*-* 00,12:00:00
RandomizedDelaySec=3600
Persistent=true
[Install]
WantedBy=timers.target
dest: /etc/systemd/system/certbot-renew.timer
mode: '0644'
notify: restart certbot-renew timer
- name: Enable and start certbot renewal timer
become: true
ansible.builtin.systemd:
name: certbot-renew.timer
enabled: true
state: started
daemon_reload: true
# -------------------------------------------------------------------------
# Initial Metrics Update
# -------------------------------------------------------------------------
- name: Ensure prometheus textfile directory exists
become: true
ansible.builtin.file:
path: "{{ prometheus_node_exporter_text_directory }}"
state: directory
owner: prometheus
group: prometheus
mode: '0755'
- name: Run certificate metrics script
become: true
ansible.builtin.command: "{{ certbot_directory }}/hooks/cert-metrics.sh"
changed_when: false
# -------------------------------------------------------------------------
# Verification
# -------------------------------------------------------------------------
- name: Verify certificate exists
become: true
ansible.builtin.stat:
path: "{{ haproxy_cert_path }}"
register: final_cert
- name: Certificate deployment status
ansible.builtin.debug:
msg: "Certificate deployed: {{ final_cert.stat.exists }}"

View File

@@ -0,0 +1,8 @@
# Namecheap API credentials for certbot DNS-01 validation
# Managed by Ansible - DO NOT EDIT MANUALLY
dns_namecheap_username = {{ namecheap_username }}
dns_namecheap_api_key = {{ namecheap_api_key }}
{% if namecheap_client_ip is defined %}
dns_namecheap_client_ip = {{ namecheap_client_ip }}
{% endif %}

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Certbot post-renewal hook for HAProxy
# Managed by Ansible - DO NOT EDIT MANUALLY
#
# This script:
# 1. Combines fullchain.pem + privkey.pem into HAProxy format
# 2. Sets correct permissions
# 3. Reloads HAProxy via Docker
# 4. Updates certificate metrics for Prometheus
set -euo pipefail
CERT_NAME="{{ certbot_cert_name }}"
CERT_DIR="{{ certbot_directory }}/config/live/${CERT_NAME}"
HAPROXY_CERT="{{ haproxy_cert_path }}"
HAPROXY_DIR="{{ haproxy_directory }}"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Starting renewal hook for ${CERT_NAME}"
# Check if certificate files exist
if [[ ! -f "${CERT_DIR}/fullchain.pem" ]] || [[ ! -f "${CERT_DIR}/privkey.pem" ]]; then
echo "ERROR: Certificate files not found in ${CERT_DIR}"
exit 1
fi
# Combine certificate and private key for HAProxy
# HAProxy requires both in a single PEM file
cat "${CERT_DIR}/fullchain.pem" "${CERT_DIR}/privkey.pem" > "${HAPROXY_CERT}.tmp"
# Atomic move to avoid HAProxy reading partial file
mv "${HAPROXY_CERT}.tmp" "${HAPROXY_CERT}"
# Set permissions
chown {{ certbot_user }}:{{ haproxy_group }} "${HAPROXY_CERT}"
chmod 640 "${HAPROXY_CERT}"
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Certificate combined and written to ${HAPROXY_CERT}"
# Reload HAProxy if running
if docker ps --format '{{ '{{' }}.Names{{ '}}' }}' | grep -q haproxy; then
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Reloading HAProxy..."
cd "${HAPROXY_DIR}"
docker compose kill -s HUP haproxy || docker-compose kill -s HUP haproxy
echo "[$(date '+%Y-%m-%d %H:%M:%S')] HAProxy reloaded"
else
echo "[$(date '+%Y-%m-%d %H:%M:%S')] HAProxy not running, skipping reload"
fi
# Update certificate metrics
{{ certbot_directory }}/hooks/cert-metrics.sh
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Renewal hook completed successfully"

99
ansible/docker/deploy.yml Normal file
View File

@@ -0,0 +1,99 @@
---
- name: Deploy Docker
hosts: ubuntu
become: true
tasks:
- name: Check if host has docker service
ansible.builtin.set_fact:
has_docker_service: "{{'docker' in services}}"
- name: Skip hosts without docker service
ansible.builtin.meta: end_host
when: not has_docker_service
- name: Add Docker repository
ansible.builtin.deb822_repository:
name: docker
types: [deb]
uris: https://download.docker.com/linux/ubuntu
suites: ["{{ ansible_distribution_release }}"]
components: [stable]
signed_by: https://download.docker.com/linux/ubuntu/gpg
state: present
- name: Update apt and install docker-ce
ansible.builtin.apt:
name: docker-ce
state: latest
update_cache: true
- name: Enable and start docker service
ansible.builtin.systemd:
name: docker
enabled: true
state: started
- name: Add ansible_user to docker group
ansible.builtin.user:
name: "{{ansible_user}}"
groups: docker
append: true
- name: Check if Docker API should be enabled
ansible.builtin.set_fact:
enable_docker_api: "{{ docker_api_enabled | default(false) }}"
- name: Configure Docker daemon for API exposure
ansible.builtin.copy:
content: |
{
"hosts": ["unix:///var/run/docker.sock", "tcp://{{ docker_api_host }}:{{ docker_api_port }}"],
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
}
}
dest: /etc/docker/daemon.json
owner: root
group: root
mode: '644'
when: enable_docker_api
notify: restart docker
- name: Create systemd override directory
ansible.builtin.file:
path: /etc/systemd/system/docker.service.d
state: directory
mode: '755'
- name: Create AppArmor workaround for Incus nested Docker
ansible.builtin.copy:
content: |
[Service]
Environment=container="setmeandforgetme"
dest: /etc/systemd/system/docker.service.d/apparmor-workaround.conf
owner: root
group: root
mode: '644'
notify: restart docker
- name: Create systemd override for Docker API
ansible.builtin.copy:
content: |
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd
dest: /etc/systemd/system/docker.service.d/override.conf
owner: root
group: root
mode: '644'
when: enable_docker_api
notify: restart docker
handlers:
- name: restart docker
ansible.builtin.systemd:
name: docker
state: restarted
daemon_reload: true

View File

@@ -0,0 +1,19 @@
---
# Example: Ansible Vault Implementation
# Replace fetch_secrets.yml with this file if using Ansible Vault
#
# This implementation does nothing because vault variables are automatically
# loaded from inventory/group_vars/all/vault.yml when using --ask-vault-pass
#
# Usage:
# cp fetch_secrets.yml.vault_example fetch_secrets.yml
- name: Fetch Secrets (Ansible Vault)
hosts: all
gather_facts: false
tasks:
- name: Verify vault variables are loaded
ansible.builtin.debug:
msg: "Using Ansible Vault - secrets loaded from vault.yml"
run_once: true
when: secret_scope is not defined

166
ansible/gitea/app.ini.j2 Normal file
View File

@@ -0,0 +1,166 @@
; Gitea Configuration File
; Generated by Ansible
APP_NAME = Gitea: Git with a cup of tea
RUN_MODE = prod
[server]
PROTOCOL = http
DOMAIN = {{ gitea_domain }}
ROOT_URL = {{ gitea_root_url }}
HTTP_ADDR = 0.0.0.0
HTTP_PORT = {{ gitea_web_port }}
DISABLE_SSH = false
SSH_DOMAIN = {{ gitea_domain }}
SSH_PORT = {{ gitea_ssh_port }}
SSH_LISTEN_PORT = {{ gitea_ssh_port }}
START_SSH_SERVER = true
LFS_START_SERVER = {{ gitea_lfs_enabled | lower }}
LFS_HTTP_AUTH_EXPIRY = 20m
OFFLINE_MODE = false
[database]
DB_TYPE = {{ gitea_db_type }}
HOST = {{ gitea_db_host }}:{{ gitea_db_port }}
NAME = {{ gitea_db_name }}
USER = {{ gitea_db_user }}
PASSWD = {{ gitea_db_password }}
SSL_MODE = {{ gitea_db_ssl_mode }}
LOG_SQL = false
AUTO_MIGRATION = true
[repository]
ROOT = {{ gitea_repo_root }}
DEFAULT_BRANCH = main
DEFAULT_PRIVATE = public
ENABLE_PUSH_CREATE_USER = true
ENABLE_PUSH_CREATE_ORG = false
DISABLED_REPO_UNITS =
DEFAULT_REPO_UNITS = repo.code,repo.releases,repo.issues,repo.pulls,repo.wiki,repo.projects,repo.packages
[repository.signing]
SIGNING_KEY = default
INITIAL_COMMIT = always
[repository.local]
LOCAL_COPY_PATH = {{ gitea_data_dir }}/tmp/local-repo
[repository.upload]
TEMP_PATH = {{ gitea_data_dir }}/tmp/uploads
[lfs]
PATH = {{ gitea_lfs_dir }}
[security]
INSTALL_LOCK = true
SECRET_KEY = {{ gitea_secret_key }}
MIN_PASSWORD_LENGTH = 8
PASSWORD_COMPLEXITY = lower,upper,digit
PASSWORD_HASH_ALGO = argon2
REVERSE_PROXY_LIMIT = 1
REVERSE_PROXY_TRUSTED_PROXIES = 127.0.0.0/8,::1/128,10.0.0.0/8
[service]
DISABLE_REGISTRATION = {{ gitea_disable_registration | lower }}
REQUIRE_SIGNIN_VIEW = {{ gitea_require_signin_view | lower }}
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
DEFAULT_KEEP_EMAIL_PRIVATE = true
DEFAULT_ALLOW_CREATE_ORGANIZATION = true
DEFAULT_ENABLE_TIMETRACKING = true
NO_REPLY_ADDRESS = noreply.{{ gitea_domain }}
[service.explore]
REQUIRE_SIGNIN_VIEW = {{ gitea_require_signin_view | lower }}
DISABLE_USERS_PAGE = false
[mailer]
ENABLED = true
SMTP_ADDR = {{ smtp_host }}
SMTP_PORT = {{ smtp_port }}
FROM = {{ smtp_from }}
[session]
PROVIDER = memcache
PROVIDER_CONFIG = 127.0.0.1:11211
COOKIE_NAME = gitea_session
COOKIE_SECURE = true
[picture]
AVATAR_UPLOAD_PATH = {{ gitea_data_dir }}/avatars
REPOSITORY_AVATAR_UPLOAD_PATH = {{ gitea_data_dir }}/repo-avatars
DISABLE_GRAVATAR = false
[attachment]
PATH = {{ gitea_data_dir }}/attachments
MAX_SIZE = 50
MAX_FILES = 5
[log]
MODE = console
LEVEL = Info
ENABLE_SSH_LOG = true
;; Sub-logger modes using new 1.21+ format
logger.router.MODE = console
logger.access.MODE = console
[log.console]
LEVEL = Info
STDERR = false
[git]
PATH = /usr/bin/git
DISABLE_DIFF_HIGHLIGHT = false
MAX_GIT_DIFF_LINES = 1000
MAX_GIT_DIFF_LINE_CHARACTERS = 5000
MAX_GIT_DIFF_FILES = 100
GC_ARGS =
[git.timeout]
DEFAULT = 360
MIGRATE = 600
MIRROR = 300
[indexer]
ISSUE_INDEXER_TYPE = bleve
ISSUE_INDEXER_PATH = {{ gitea_data_dir }}/indexers/issues.bleve
REPO_INDEXER_ENABLED = true
REPO_INDEXER_TYPE = bleve
REPO_INDEXER_PATH = {{ gitea_data_dir }}/indexers/repos.bleve
[queue]
TYPE = level
DATADIR = {{ gitea_data_dir }}/queues
[metrics]
ENABLED = {{ gitea_metrics_enabled | lower }}
ENABLED_ISSUE_BY_LABEL = false
ENABLED_ISSUE_BY_REPOSITORY = false
TOKEN = {{ gitea_metrics_token }}
[cache]
ADAPTER = memcache
HOST = 127.0.0.1:11211
ITEM_TTL = 16h
[webhook]
ALLOWED_HOST_LIST = *
[oauth2]
ENABLED = true
JWT_SIGNING_ALGORITHM = RS256
JWT_SECRET = {{ gitea_lfs_jwt_secret }}
[oauth2_client]
ENABLE_AUTO_REGISTRATION = true
ACCOUNT_LINKING = auto
OPENID_CONNECT_SCOPES = openid profile email
UPDATE_AVATAR = false
[packages]
ENABLED = true
CHUNKED_UPLOAD_PATH = {{ gitea_data_dir }}/tmp/package-upload
[actions]
ENABLED = true
DEFAULT_ACTIONS_URL = https://github.com

229
ansible/gitea/deploy.yml Normal file
View File

@@ -0,0 +1,229 @@
---
- name: Deploy Gitea
hosts: gitea
become: true
tasks:
- name: Check if host has gitea service
ansible.builtin.set_fact:
has_gitea_service: "{{ 'gitea' in services | default([]) }}"
- name: Skip hosts without gitea service
ansible.builtin.meta: end_host
when: not has_gitea_service
- name: Install required packages
ansible.builtin.apt:
name:
- git
- git-lfs
- curl
- memcached
state: present
update_cache: true
- name: Ensure Memcached is running
ansible.builtin.service:
name: memcached
state: started
enabled: true
- name: Create git system group
ansible.builtin.group:
name: "{{ gitea_group }}"
system: true
state: present
- name: Create git system user
ansible.builtin.user:
name: "{{ gitea_user }}"
group: "{{ gitea_group }}"
system: true
shell: /bin/bash
home: "{{ gitea_home_dir }}"
create_home: true
comment: "Git Version Control"
- name: Create Gitea directories
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.group }}"
mode: "{{ item.mode }}"
loop:
- { path: "{{ gitea_work_dir }}", owner: "{{ gitea_user }}", group: "{{ gitea_group }}", mode: "0755" }
- { path: "{{ gitea_work_dir }}/custom", owner: "{{ gitea_user }}", group: "{{ gitea_group }}", mode: "0755" }
- { path: "{{ gitea_data_dir }}", owner: "{{ gitea_user }}", group: "{{ gitea_group }}", mode: "0755" }
- { path: "{{ gitea_lfs_dir }}", owner: "{{ gitea_user }}", group: "{{ gitea_group }}", mode: "0755" }
- { path: "{{ gitea_repo_root }}", owner: "{{ gitea_user }}", group: "{{ gitea_group }}", mode: "0755" }
- { path: "/etc/gitea", owner: "root", group: "{{ gitea_group }}", mode: "0770" }
- name: Get installed Gitea version
ansible.builtin.command:
cmd: /usr/local/bin/gitea --version
register: gitea_installed_version
changed_when: false
failed_when: false
- name: Parse installed version
ansible.builtin.set_fact:
gitea_current_version: "{{ gitea_installed_version.stdout | regex_search('([0-9]+\\.[0-9]+\\.[0-9]+)') | default('0.0.0') }}"
when: gitea_installed_version.rc == 0
- name: Set current version to 0.0.0 if not installed
ansible.builtin.set_fact:
gitea_current_version: "0.0.0"
when: gitea_installed_version.rc != 0
- name: Get latest Gitea release version from GitHub
ansible.builtin.uri:
url: https://api.github.com/repos/go-gitea/gitea/releases/latest
return_content: true
register: gitea_latest_release
- name: Extract latest version number
ansible.builtin.set_fact:
gitea_latest_version: "{{ gitea_latest_release.json.tag_name | regex_replace('^v', '') }}"
- name: Display version information
ansible.builtin.debug:
msg: "Gitea: installed={{ gitea_current_version }}, latest={{ gitea_latest_version }}"
- name: Stop Gitea before upgrade
ansible.builtin.systemd:
name: gitea
state: stopped
when:
- gitea_current_version != gitea_latest_version
- gitea_current_version != "0.0.0"
- name: Download Gitea binary
ansible.builtin.get_url:
url: "https://dl.gitea.com/gitea/{{ gitea_latest_version }}/gitea-{{ gitea_latest_version }}-linux-amd64"
dest: /usr/local/bin/gitea
mode: '0755'
owner: root
group: root
force: true
when: gitea_current_version != gitea_latest_version
notify: restart gitea
- name: Template Gitea configuration
ansible.builtin.template:
src: app.ini.j2
dest: "{{ gitea_config_file }}"
owner: "{{ gitea_user }}"
group: "{{ gitea_group }}"
mode: '0640'
notify: restart gitea
- name: Create Gitea systemd service
ansible.builtin.copy:
dest: /etc/systemd/system/gitea.service
mode: '0644'
owner: root
group: root
content: |
[Unit]
Description=Gitea (Git with a cup of tea)
After=syslog.target
After=network.target
After=postgresql.service
[Service]
RestartSec=2s
Type=simple
User={{ gitea_user }}
Group={{ gitea_group }}
WorkingDirectory={{ gitea_work_dir }}/
ExecStart=/usr/local/bin/gitea web --config {{ gitea_config_file }}
Restart=always
Environment=USER={{ gitea_user }} HOME={{ gitea_home_dir }} GITEA_WORK_DIR={{ gitea_work_dir }}
[Install]
WantedBy=multi-user.target
notify: restart gitea
- name: Reload systemd daemon
ansible.builtin.systemd:
daemon_reload: true
- name: Enable and start Gitea service
ansible.builtin.systemd:
name: gitea
enabled: true
state: started
# OAuth2 Provider Configuration (Casdoor SSO)
- name: Flush handlers to ensure Gitea is restarted before healthcheck
ansible.builtin.meta: flush_handlers
- name: Wait for Gitea to be ready
ansible.builtin.uri:
url: "http://127.0.0.1:{{ gitea_web_port }}/api/healthz"
method: GET
status_code: 200
register: gitea_health
until: gitea_health.status == 200
retries: 30
delay: 5
when: gitea_oauth_enabled | default(false)
- name: Check if Casdoor OAuth source exists
ansible.builtin.command:
cmd: >
/usr/local/bin/gitea admin auth list
--config {{ gitea_config_file }}
become: true
become_user: "{{ gitea_user }}"
register: gitea_auth_list
changed_when: false
when: gitea_oauth_enabled | default(false)
- name: Add Casdoor OAuth2 authentication source
ansible.builtin.command:
cmd: >
/usr/local/bin/gitea admin auth add-oauth
--config {{ gitea_config_file }}
--name "{{ gitea_oauth_name }}"
--provider openidConnect
--key "{{ gitea_oauth_client_id }}"
--secret "{{ gitea_oauth_client_secret }}"
--auto-discover-url "https://id.ouranos.helu.ca/.well-known/openid-configuration"
--scopes "{{ gitea_oauth_scopes }}"
--skip-local-2fa
--group-claim-name ""
--admin-group ""
become: true
become_user: "{{ gitea_user }}"
when:
- gitea_oauth_enabled | default(false)
- gitea_oauth_name not in gitea_auth_list.stdout
notify: restart gitea
- name: Update Casdoor OAuth2 authentication source
ansible.builtin.command:
cmd: >
/usr/local/bin/gitea admin auth update-oauth
--config {{ gitea_config_file }}
--id {{ gitea_auth_list.stdout_lines | select('search', gitea_oauth_name) | first | regex_search('^\d+') }}
--name "{{ gitea_oauth_name }}"
--provider openidConnect
--key "{{ gitea_oauth_client_id }}"
--secret "{{ gitea_oauth_client_secret }}"
--auto-discover-url "https://id.ouranos.helu.ca/.well-known/openid-configuration"
--scopes "{{ gitea_oauth_scopes }}"
--skip-local-2fa
become: true
become_user: "{{ gitea_user }}"
when:
- gitea_oauth_enabled | default(false)
- gitea_oauth_name in gitea_auth_list.stdout
notify: restart gitea
handlers:
- name: restart gitea
ansible.builtin.systemd:
name: gitea
state: restarted
daemon_reload: true

View File

@@ -0,0 +1,56 @@
---
- name: Deploy Gitea MCP Server with Docker Compose
hosts: ubuntu
become: true
vars:
required_service: gitea_mcp
tasks:
- name: Check if host has gitea_mcp service
ansible.builtin.set_fact:
has_gitea_mcp_service: "{{ required_service in services | default([]) }}"
- name: Skip hosts without gitea_mcp service
ansible.builtin.meta: end_host
when: not has_gitea_mcp_service
- name: Create gitea_mcp group
ansible.builtin.group:
name: "{{gitea_mcp_group}}"
- name: Create gitea_mcp user
ansible.builtin.user:
name: "{{gitea_mcp_user}}"
comment: "{{gitea_mcp_user}}"
group: "{{gitea_mcp_group}}"
system: true
- name: Add group gitea_mcp to Ansible remote_user
ansible.builtin.user:
name: "{{remote_user}}"
groups: "{{gitea_mcp_group}}"
append: true
- name: Create gitea_mcp directory
ansible.builtin.file:
path: "{{gitea_mcp_directory}}"
owner: "{{gitea_mcp_user}}"
group: "{{gitea_mcp_group}}"
state: directory
mode: '750'
- name: Template docker-compose file
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{gitea_mcp_directory}}/docker-compose.yml"
owner: "{{gitea_mcp_user}}"
group: "{{gitea_mcp_group}}"
mode: '550'
- name: Reset SSH connection to apply group changes
meta: reset_connection
- name: Start Gitea MCP service
community.docker.docker_compose_v2:
project_src: "{{gitea_mcp_directory}}"
state: present
pull: always

View File

@@ -0,0 +1,18 @@
services:
gitea-mcp:
image: docker.gitea.com/gitea-mcp-server:latest
pull_policy: always
container_name: gitea-mcp
restart: unless-stopped
ports:
- "{{gitea_mcp_port}}:8000"
environment:
- GITEA_HOST={{gitea_mcp_host}}
- GITEA_ACCESS_TOKEN={{gitea_mcp_access_token}}
command: ["/app/gitea-mcp", "-t", "http", "--port", "8000"]
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{gitea_mcp_syslog_port}}"
syslog-format: "{{syslog_format}}"
tag: "gitea-mcp"

View File

@@ -0,0 +1,36 @@
---
- name: Remove Gitea MCP Server
hosts: ubuntu
become: true
tasks:
- name: Check if host has gitea_mcp service
ansible.builtin.set_fact:
has_gitea_mcp_service: "{{ 'gitea_mcp' in services | default([]) }}"
- name: Skip hosts without gitea_mcp service
ansible.builtin.meta: end_host
when: not has_gitea_mcp_service
- name: Check if docker-compose.yml exists
ansible.builtin.stat:
path: "{{gitea_mcp_directory}}/docker-compose.yml"
register: compose_file
- name: Stop and remove Docker containers, volumes, and images
community.docker.docker_compose_v2:
project_src: "{{gitea_mcp_directory}}"
state: absent
remove_images: all
remove_volumes: true
when: compose_file.stat.exists
- name: Prune Docker images
community.docker.docker_prune:
images: true
images_filters:
dangling: false
- name: Remove Gitea MCP directory
ansible.builtin.file:
path: "{{gitea_mcp_directory}}"
state: absent

View File

@@ -0,0 +1,110 @@
# Gitea Act Runner configuration
# Managed by Ansible - edit this file, then re-run the playbook.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# The timeout for the runner to wait for running jobs to finish when shutting down.
# Any running jobs that haven't finished after this timeout will be cancelled.
shutdown_timeout: 0s
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The github_mirror of a runner is used to specify the mirror address of the github that pulls the action repository.
# It works when something like `uses: actions/checkout@v4` is used and DEFAULT_ACTIONS_URL is set to github,
# and github_mirror is not empty. In this case,
# it replaces https://github.com with the value here, which is useful for some special network environments.
github_mirror: ''
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: "macos-arm64:host" or "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
# Find more images provided by Gitea at https://gitea.com/docker.gitea.com/runner-images .
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `daemon`, will use labels in `.runner` file.
labels:
- "ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest"
- "ubuntu-24.04:docker://docker.gitea.com/runner-images:ubuntu-24.04"
- "ubuntu-22.04:docker://docker.gitea.com/runner-images:ubuntu-22.04"
- "ubuntu-20.04:docker://docker.gitea.com/runner-images:ubuntu-20.04"
- "node-24:docker://node:24-bookworm"
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# NOTE: There is no need to add the first '/' of the path as act_runner will add it automatically.
# If the path starts with '/', the '/' will be trimmed.
# For example, if the parent directory is /path/to/my/dir, workdir_parent should be path/to/my/dir
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: true
# Rebuild docker image(s) even if already present
force_rebuild: false
# Always require a reachable docker daemon, even if not required by act_runner
require_docker: false
# Timeout to wait for the docker daemon to be reachable, if docker is required by require_docker or act_runner
docker_timeout: 0s
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@@ -0,0 +1,157 @@
---
- name: Deploy Gitea Runner
hosts: ubuntu
become: true
tasks:
- name: Check if host has gitea_runner service
ansible.builtin.set_fact:
has_gitea_runner_service: "{{ 'gitea_runner' in services }}"
- name: Skip hosts without gitea_runner service
ansible.builtin.meta: end_host
when: not has_gitea_runner_service
# =========================================================================
# Service Account
# =========================================================================
- name: Create gitea-runner system group
ansible.builtin.group:
name: gitea-runner
system: true
state: present
- name: Create gitea-runner system user
ansible.builtin.user:
name: gitea-runner
group: gitea-runner
groups: docker
append: true
system: true
shell: /bin/bash
home: /srv/gitea-runner
create_home: true
comment: "Gitea Act Runner"
# =========================================================================
# Binary Installation
# =========================================================================
- name: Ensure /usr/local/bin directory exists
ansible.builtin.file:
path: /usr/local/bin
state: directory
mode: '0755'
owner: root
group: root
- name: Check current act_runner version
ansible.builtin.command: /usr/local/bin/act_runner --version
register: act_runner_current_version
changed_when: false
failed_when: false
- name: Download act_runner binary
ansible.builtin.get_url:
url: "https://gitea.com/gitea/act_runner/releases/download/v{{ act_runner_version }}/act_runner-{{ act_runner_version }}-linux-amd64"
dest: /usr/local/bin/act_runner
mode: '0755'
owner: root
group: root
force: true
when: act_runner_current_version.rc != 0 or act_runner_version not in (act_runner_current_version.stdout | default(''))
notify: restart gitea-runner
# =========================================================================
# Configuration
# =========================================================================
- name: Copy runner config
ansible.builtin.copy:
src: config.yaml
dest: /srv/gitea-runner/config.yaml
owner: gitea-runner
group: gitea-runner
mode: '0644'
notify: restart gitea-runner
# =========================================================================
# Systemd Service
# =========================================================================
- name: Template gitea-runner systemd service
ansible.builtin.template:
src: gitea-runner.service.j2
dest: /etc/systemd/system/gitea-runner.service
owner: root
group: root
mode: '0644'
notify: restart gitea-runner
- name: Check if runner is registered
ansible.builtin.stat:
path: /srv/gitea-runner/.runner
register: runner_registration
# =========================================================================
# Registration
# =========================================================================
- name: Prompt for registration token
ansible.builtin.pause:
prompt: |
Gitea runner registration required.
Get token from: {{ gitea_runner_instance_url }}/-/admin/runners
Enter registration token
register: runner_token
when:
- not runner_registration.stat.exists
- registration_token is not defined
- name: Set registration token from prompt or variable
ansible.builtin.set_fact:
runner_registration_token: "{{ registration_token | default(runner_token.user_input) }}"
when: not runner_registration.stat.exists
- name: Register runner with Gitea instance
ansible.builtin.shell:
cmd: >
sudo -u gitea-runner
/usr/local/bin/act_runner register
--instance {{ gitea_runner_instance_url }}
--token {{ runner_registration_token }}
--name {{ gitea_runner_name }}
--no-interactive
args:
creates: /srv/gitea-runner/.runner
chdir: /srv/gitea-runner
when: not runner_registration.stat.exists
# =========================================================================
# Service Management
# =========================================================================
- name: Enable gitea-runner service
ansible.builtin.systemd:
name: gitea-runner
enabled: true
daemon_reload: true
- name: Start gitea-runner service
ansible.builtin.systemd:
name: gitea-runner
state: started
# ===========================================================================
# Handlers
# ===========================================================================
handlers:
- name: restart gitea-runner
ansible.builtin.systemd:
name: gitea-runner
state: restarted
daemon_reload: true

View File

@@ -0,0 +1,17 @@
[Unit]
Description=Gitea Runner
After=network.target docker.service
Requires=docker.service
[Service]
Type=simple
User=gitea-runner
Group=gitea-runner
WorkingDirectory=/srv/gitea-runner
ExecStart=/usr/local/bin/act_runner daemon --config /srv/gitea-runner/config.yaml
Restart=on-failure
RestartSec=10
Environment=HOME=/srv/gitea-runner
[Install]
WantedBy=multi-user.target

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,15 @@
apiVersion: 1
datasources:
- name: {{prometheus_datasource_name}}
type: prometheus
access: proxy
url: http://{{prometheus_host}}:{{prometheus_port}}
isDefault: true
editable: false
uid: {{prometheus_datasource_uid}}
- name: {{loki_datasource_name}}
type: loki
access: proxy
url: http://{{loki_host}}:{{loki_port}}
editable: false
uid: {{loki_datasource_uid}}

113
ansible/grafana/deploy.yml Normal file
View File

@@ -0,0 +1,113 @@
---
- name: Deploy Grafana
hosts: ubuntu
become: true
tasks:
- name: Check if host has grafana service
ansible.builtin.set_fact:
has_grafana_service: "{{'grafana' in services}}"
- name: Skip hosts without grafana service
ansible.builtin.meta: end_host
when: not has_grafana_service
- name: Add Grafana repository
ansible.builtin.deb822_repository:
name: grafana
types: [deb]
uris: https://apt.grafana.com
suites: [stable]
components: [main]
signed_by: https://apt.grafana.com/gpg.key
state: present
- name: Install Grafana
become: true
ansible.builtin.apt:
name: grafana
state: present
update_cache: true
- name: Create provisioning directories
become: true
ansible.builtin.file:
path: "{{item}}"
state: directory
owner: grafana
group: grafana
mode: '750'
loop:
- /etc/grafana/provisioning/dashboards
- /etc/grafana/provisioning/datasources
- /etc/grafana/provisioning/users
- name: Create dashboards directory
become: true
ansible.builtin.file:
path: /var/lib/grafana/dashboards
state: directory
owner: grafana
group: grafana
mode: '750'
- name: Template configuration files
become: true
ansible.builtin.template:
src: "{{item.src}}"
dest: "{{item.dest}}"
owner: grafana
group: grafana
mode: '550'
loop:
- src: "datasource.yml.j2"
dest: "/etc/grafana/provisioning/datasources/prometheus.yml"
- src: "users.yml.j2"
dest: "/etc/grafana/provisioning/users/users.yml"
notify: restart grafana
- name: Template Grafana main configuration
become: true
ansible.builtin.template:
src: "grafana.ini.j2"
dest: "/etc/grafana/grafana.ini"
owner: grafana
group: grafana
mode: '640'
when: grafana_oauth_enabled | default(false)
notify: restart grafana
- name: Configure dashboard provisioning
become: true
ansible.builtin.copy:
content: |
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards
dest: /etc/grafana/provisioning/dashboards/dashboard.yml
owner: grafana
group: grafana
mode: '550'
notify: restart grafana
- name: Enable and start Grafana service
become: true
ansible.builtin.systemd:
name: grafana-server
enabled: true
state: started
daemon_reload: true
handlers:
- name: restart grafana
become: true
ansible.builtin.systemd:
name: grafana-server
state: restarted

View File

@@ -0,0 +1,36 @@
# Grafana Configuration - Managed by Ansible
# Do not edit manually - changes will be overwritten
[server]
root_url = {{ grafana_root_url }}
[auth]
# Disable login form for OAuth users (admins can still use local auth)
disable_login_form = false
[auth.generic_oauth]
enabled = {{ grafana_oauth_enabled | default(false) | lower }}
name = {{ grafana_oauth_name | default('Casdoor') }}
allow_sign_up = {{ grafana_oauth_allow_sign_up | default(true) | lower }}
client_id = {{ grafana_oauth_client_id }}
client_secret = {{ grafana_oauth_client_secret }}
scopes = {{ grafana_oauth_scopes | default('openid profile email') }}
auth_url = {{ grafana_oauth_auth_url }}
token_url = {{ grafana_oauth_token_url }}
api_url = {{ grafana_oauth_api_url }}
# Map Casdoor user attributes to Grafana
email_attribute_path = email
login_attribute_path = preferred_username
name_attribute_path = name
# Default role for new OAuth users
role_attribute_path = contains(groups[*], 'grafana-admin') && 'Admin' || contains(groups[*], 'grafana-editor') && 'Editor' || 'Viewer'
# TLS settings for internal communication
tls_skip_verify_insecure = {{ grafana_oauth_skip_tls_verify | default(true) | lower }}
[log]
# Console-only logging — systemd journal captures output, Alloy ships to Loki
mode = console
level = {{ grafana_log_level | default('info') }}
[log.console]
format = text

View File

@@ -0,0 +1,15 @@
apiVersion: 1
users:
- name: {{grafana_admin_name}}
orgId: 1
login: {{grafana_admin_login}}
password: {{grafana_admin_password}}
isAdmin: true
- name: {{grafana_viewer_name}}
orgId: 1
login: {{grafana_viewer_login}}
password: {{grafana_viewer_password}}
isAdmin: false
permissions:
- permission: 1 # View permission
role: Viewer

View File

@@ -0,0 +1,92 @@
---
# Grafana MCP Server - Docker Compose deployment on Miranda
#
# Grafana itself runs inside the PPLG stack on Prospero (see docs/pplg.md).
# This playbook deploys the Grafana MCP server container on Miranda, which
# connects back to Grafana on Prospero via the internal Incus network.
#
# Prerequisites:
# - PPLG stack deployed on Prospero (ansible-playbook pplg/deploy.yml)
# - Grafana service account token in vault (vault_grafana_service_account_token)
# - Docker installed on the target host (ansible-playbook docker/deploy.yml)
#
# See also: docs/grafana_mcp.md
- name: Deploy Grafana MCP Server with Docker Compose
hosts: ubuntu
become: true
vars:
required_service: grafana_mcp
tasks:
- name: Check if host has grafana_mcp service
ansible.builtin.set_fact:
has_grafana_mcp_service: "{{ required_service in services | default([]) }}"
- name: Skip hosts without grafana_mcp service
ansible.builtin.meta: end_host
when: not has_grafana_mcp_service
- name: Verify Grafana is reachable on PPLG host
ansible.builtin.uri:
url: "http://{{grafana_mcp_grafana_host}}:{{grafana_mcp_grafana_port}}/api/health"
method: GET
status_code: 200
register: grafana_health
retries: 3
delay: 5
- name: Create grafana_mcp group
ansible.builtin.group:
name: "{{grafana_mcp_group}}"
- name: Create grafana_mcp user
ansible.builtin.user:
name: "{{grafana_mcp_user}}"
comment: "{{grafana_mcp_user}}"
group: "{{grafana_mcp_group}}"
system: true
- name: Add group grafana_mcp to Ansible remote_user
ansible.builtin.user:
name: "{{remote_user}}"
groups: "{{grafana_mcp_group}}"
append: true
- name: Create grafana_mcp directory
ansible.builtin.file:
path: "{{grafana_mcp_directory}}"
owner: "{{grafana_mcp_user}}"
group: "{{grafana_mcp_group}}"
state: directory
mode: '750'
- name: Template docker-compose file
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{grafana_mcp_directory}}/docker-compose.yml"
owner: "{{grafana_mcp_user}}"
group: "{{grafana_mcp_group}}"
mode: '550'
- name: Reset SSH connection to apply group changes
meta: reset_connection
- name: Start Grafana MCP service
community.docker.docker_compose_v2:
project_src: "{{grafana_mcp_directory}}"
state: present
pull: always
- name: Verify Grafana MCP container is responding
ansible.builtin.uri:
url: "http://localhost:{{grafana_mcp_port}}/mcp"
method: GET
status_code: [200, 405]
register: grafana_mcp_health
retries: 5
delay: 5
ignore_errors: true
- name: Report Grafana MCP health status
ansible.builtin.debug:
msg: "Grafana MCP container is {{ 'responding' if not grafana_mcp_health.failed else 'not responding - check docker logs grafana-mcp' }}"

View File

@@ -0,0 +1,18 @@
services:
grafana-mcp:
image: mcp/grafana:latest
pull_policy: always
container_name: grafana-mcp
restart: unless-stopped
ports:
- "{{grafana_mcp_port}}:8000"
environment:
- GRAFANA_URL=http://{{grafana_mcp_grafana_host}}:{{grafana_mcp_grafana_port}}
- GRAFANA_SERVICE_ACCOUNT_TOKEN={{grafana_service_account_token}}
command: ["--transport", "streamable-http", "--address", "0.0.0.0:8000", "--tls-skip-verify"]
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{grafana_mcp_syslog_port}}"
syslog-format: "{{syslog_format}}"
tag: "grafana-mcp"

117
ansible/haproxy/deploy.yml Normal file
View File

@@ -0,0 +1,117 @@
---
- name: Deploy HAProxy
hosts: ubuntu
tasks:
- name: Check if host has haproxy service
set_fact:
has_haproxy_service: "{{'haproxy' in services}}"
- name: Skip hosts without haproxy service
meta: end_host
when: not has_haproxy_service
- name: Create haproxy group
become: true
ansible.builtin.group:
name: "{{haproxy_group}}"
gid: "{{haproxy_gid}}"
system: true
- name: Create haproxy user
become: true
ansible.builtin.user:
name: "{{haproxy_user}}"
comment: "{{haproxy_user}}"
group: "{{haproxy_group}}"
uid: "{{haproxy_uid}}"
system: true
- name: Add group haproxy to ansible_user
become: true
ansible.builtin.user:
name: "{{ansible_user}}"
groups: "{{haproxy_group}}"
append: true
- name: Create required directories
become: true
ansible.builtin.file:
path: "{{haproxy_directory}}"
owner: "{{haproxy_user}}"
group: "{{haproxy_group}}"
state: directory
mode: '750'
- name: Create /etc/haproxy directory
become: true
ansible.builtin.file:
path: /etc/haproxy
owner: root
group: root
state: directory
mode: '755'
- name: Create certs directory
become: true
ansible.builtin.file:
path: /etc/haproxy/certs
owner: "{{haproxy_user}}"
group: "{{haproxy_group}}"
state: directory
mode: '750'
- name: Check if certificate already exists
become: true
stat:
path: "{{ haproxy_cert_path }}"
register: cert_file
- name: Generate self-signed wildcard certificate
become: true
command: >
openssl req -x509 -nodes -days 365 -newkey rsa:2048
-keyout {{ haproxy_cert_path }}
-out {{ haproxy_cert_path }}
-subj "/C=US/ST=State/L=City/O=Agathos/CN=*.{{ haproxy_domain }}"
-addext "subjectAltName=DNS:*.{{ haproxy_domain }},DNS:{{ haproxy_domain }}"
when: not cert_file.stat.exists and 'certbot' not in services
- name: Set certificate permissions
become: true
ansible.builtin.file:
path: "{{ haproxy_cert_path }}"
owner: "{{haproxy_user}}"
group: "{{haproxy_group}}"
mode: '640'
- name: Install HAProxy
become: true
ansible.builtin.apt:
name: haproxy
state: present
update_cache: true
- name: Template HAProxy configuration
become: true
ansible.builtin.template:
src: "haproxy.cfg.j2"
dest: /etc/haproxy/haproxy.cfg
owner: "{{haproxy_user}}"
group: "{{haproxy_group}}"
mode: "640"
validate: haproxy -c -f %s
register: haproxy_config
- name: Enable and start HAProxy service
become: true
ansible.builtin.systemd:
name: haproxy
enabled: true
state: started
- name: Reload HAProxy if configuration changed
become: true
ansible.builtin.systemd:
name: haproxy
state: reloaded
when: haproxy_config.changed

View File

@@ -0,0 +1,114 @@
# HAProxy configuration for Agathos Titania
# Managed by Ansible - Red Panda Approved
global
log 127.0.0.1:{{ haproxy_syslog_port }} local0
stats timeout 30s
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# SSL/TLS configuration
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
defaults
log global
mode http
option httplog
option dontlognull
# Log format with timing information for latency analysis
log-format "%ci:%cp [%tr] %ft %b/%s %TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
timeout connect 5s
timeout client 50s
timeout server 50s
# Stats page with Prometheus metrics
listen stats
bind *:{{ haproxy_stats_port }}
mode http
stats enable
stats uri /metrics
stats refresh 15s
stats show-legends
stats show-node
# Prometheus metrics endpoint
http-request use-service prometheus-exporter if { path /metrics }
# HTTP frontend - redirect all traffic to HTTPS
frontend http_frontend
bind *:{{ haproxy_http_port }}
mode http
option httplog
http-request redirect scheme https code 301
# HTTPS frontend with dynamic routing
frontend https_frontend
bind *:{{ haproxy_https_port }} ssl crt {{ haproxy_cert_path }}
mode http
option httplog
option forwardfor
# Forward original protocol and host for reverse-proxied services
http-request set-header X-Forwarded-Proto https
http-request set-header X-Forwarded-Port %[dst_port]
# Security headers
http-response set-header Strict-Transport-Security "max-age=31536000; includeSubDomains"
http-response set-header X-Frame-Options "SAMEORIGIN"
http-response set-header X-Content-Type-Options "nosniff"
http-response set-header X-XSS-Protection "1; mode=block"
{% for backend in haproxy_backends %}
{% if backend.subdomain %}
# ACL for {{ backend.subdomain }}.{{ haproxy_domain }} (matches with or without port)
acl host_{{ backend.subdomain }} hdr_beg(host) -i {{ backend.subdomain }}.{{ haproxy_domain }}
{% if backend.redirect_root is defined %}
# Redirect root path to {{ backend.redirect_root }} (avoids redirect loop by matching exact path)
http-request redirect location {{ backend.redirect_root }} code 302 if host_{{ backend.subdomain }} { path / }
{% endif %}
use_backend backend_{{ backend.subdomain }} if host_{{ backend.subdomain }}
{% else %}
# Default backend for root domain
default_backend backend_root
{% endif %}
{% endfor %}
# Backend definitions
{% for backend in haproxy_backends %}
{% if backend.subdomain %}
backend backend_{{ backend.subdomain }}
{% else %}
backend backend_root
{% endif %}
mode http
balance roundrobin
{% if backend.ssl_backend | default(false) %}
option httpchk
http-check send meth GET uri {{ backend.health_path }} hdr Host {{ backend.subdomain }}.{{ haproxy_domain }}
{% else %}
option httpchk GET {{ backend.health_path }}
{% endif %}
http-check expect status 200
{% if backend.timeout_server is defined %}
timeout server {{ backend.timeout_server }}
{% endif %}
server {{ backend.subdomain or 'root' }}_1 {{ backend.backend_host }}:{{ backend.backend_port }} check{% if backend.ssl_backend | default(false) %} ssl verify none{% endif %}
{% endfor %}
{% for tcp_backend in haproxy_tcp_backends | default([]) %}
# TCP passthrough: {{ tcp_backend.name }}
frontend {{ tcp_backend.name }}_frontend
bind *:{{ tcp_backend.listen_port }}
mode tcp
option tcplog
default_backend {{ tcp_backend.name }}_backend
backend {{ tcp_backend.name }}_backend
mode tcp
server {{ tcp_backend.name }}_1 {{ tcp_backend.backend_host }}:{{ tcp_backend.backend_port }} check
{% endfor %}

View File

@@ -0,0 +1,17 @@
# Loads default set of integrations. Do not remove.
default_config:
# ISAL accelerates aiohttp
isal:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
homeassistant:
media_dirs:
media: /mnt/media

View File

@@ -0,0 +1,33 @@
# Loads default set of integrations. Do not remove.
default_config:
# ISAL accelerates aiohttp
isal:
# Load frontend themes from the themes folder
frontend:
themes: !include_dir_merge_named themes
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
homeassistant:
media_dirs:
media: {{hass_media_directory}}
# HTTP configuration for reverse proxy (HAProxy on Titania)
http:
server_port: {{hass_port}}
use_x_forwarded_for: true
trusted_proxies:
- 10.0.0.0/8
# PostgreSQL recorder (Portia)
recorder:
db_url: "postgresql://{{hass_db_user}}:{{hass_db_password}}@{{hass_db_host}}:{{hass_db_port}}/{{hass_db_name}}"
purge_keep_days: 30
commit_interval: 1
# Prometheus metrics endpoint
prometheus:

139
ansible/hass/deploy.yml Normal file
View File

@@ -0,0 +1,139 @@
---
- name: Deploy Home Assistant to Dev Environment
hosts: ubuntu
vars:
ansible_common_remote_group: "{{hass_group}}"
allow_world_readable_tmpfiles: true
tasks:
- name: Check if host has hass service
ansible.builtin.set_fact:
has_hass_service: "{{ 'hass' in services | default([]) }}"
- name: Skip hosts without hass service
ansible.builtin.meta: end_host
when: not has_hass_service
- name: Create hass user
become: true
ansible.builtin.user:
name: "{{hass_user}}"
comment: "{{hass_user}}"
system: true
create_home: false
- name: Add group hass to user {{remote_user}}
become: true
ansible.builtin.user:
name: "{{remote_user}}"
groups: "{{hass_group}}"
append: true
- name: Create required directories
become: true
ansible.builtin.file:
path: "{{item.path}}"
owner: "{{hass_user}}"
group: "{{hass_group}}"
state: directory
mode: '750'
loop:
- path: "{{hass_directory}}"
- path: "{{hass_media_directory}}"
- name: Add Deadsnakes APT repository
become: true
ansible.builtin.apt_repository:
repo: ppa:deadsnakes/ppa
- name: Install Python 3.13 and build dependencies
become: true
ansible.builtin.apt:
name:
- python3.13-dev
- python3.13-venv
- build-essential
- libffi-dev
- libssl-dev
state: present
update_cache: true
- name: Create virtual environment
become: true
become_user: "{{hass_user}}"
ansible.builtin.command:
cmd: python3.13 -m venv {{hass_directory}}/env
args:
creates: "{{hass_directory}}/env/bin/activate"
- name: Template configuration files
become: true
ansible.builtin.template:
src: "{{item.src}}"
dest: "{{hass_directory}}/{{item.dest}}"
owner: "{{hass_user}}"
group: "{{hass_group}}"
mode: '550'
loop:
- src: "configuration.yaml.j2"
dest: "configuration.yaml"
- src: "requirements.txt.j2"
dest: "requirements.txt"
notify: restart hass
- name: Create systemd service file
become: true
ansible.builtin.template:
src: hass.service.j2
dest: /etc/systemd/system/hass.service
mode: '644'
notify: restart hass
- name: Install Python packages from requirements
become: true
become_user: "{{hass_user}}"
ansible.builtin.pip:
requirements: "{{hass_directory}}/requirements.txt"
virtualenv: "{{hass_directory}}/env"
virtualenv_python: python3.13
vars:
ansible_common_remote_group: "{{hass_group}}"
allow_world_readable_tmpfiles: true
notify: restart hass
- name: Reset SSH connection to apply group changes
meta: reset_connection
- name: Enable and start Home Assistant service
become: true
ansible.builtin.systemd:
name: hass
enabled: true
state: started
daemon_reload: true
post_tasks:
- name: Wait for Home Assistant to initialize
ansible.builtin.pause:
seconds: 30
prompt: "Waiting for Home Assistant to initialize..."
- name: Check if Home Assistant is running
ansible.builtin.uri:
url: http://localhost:{{hass_port}}/
method: GET
status_code: 200
timeout: 10
register: hass_status
ignore_errors: true
- name: Show Home Assistant status
ansible.builtin.debug:
msg: "Home Assistant is {{ 'running' if hass_status.status == 200 else 'not running properly' }}"
handlers:
- name: restart hass
become: true
ansible.builtin.systemd:
name: hass
state: restarted
daemon_reload: true

View File

@@ -0,0 +1,18 @@
[Unit]
Description=Home Assistant
After=network.target
[Service]
Type=simple
User={{hass_user}}
Group={{hass_group}}
WorkingDirectory={{hass_directory}}
ExecStart=/bin/bash -c 'source {{hass_directory}}/env/bin/activate && hass --config {{hass_directory}}'
Restart=always
RestartSec=3
SyslogIdentifier=hass
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,2 @@
wheel
homeassistant=={{hass_version}}

View File

@@ -0,0 +1,36 @@
---
# SSH Authorized Keys Configuration
# Manages authorized_keys files across all ubuntu hosts
#
# Usage:
# ansible-playbook ssh_keys.yml
#
# To override exclusive mode (remove unlisted keys):
# ansible-playbook ssh_keys.yml -e "ssh_exclusive_mode=true"
# When true, removes any keys not in this list (use with caution!)
ssh_exclusive_mode: false
# List of users and their authorized SSH public keys
# Each user entry requires:
# - name: username (must exist on target hosts)
# - keys: list of SSH public key strings
#
# Example:
# ssh_authorized_users:
# - name: robert
# keys:
# - "ssh-ed25519 AAAAC3Nza... user@host"
# - "ssh-rsa AAAAB3Nza... another@host"
# - name: deploy
# keys:
# - "ssh-ed25519 AAAAC3Nza... deploy-key"
ssh_authorized_users:
- name: robert
keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH0xFMMSa1SeMPbX84zJOKWHAT3HtMRuWmNA7GGKr1uw robert@Hercules"
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBTcpW11Vb3w1Bi77WCAM5K9Q2vz9MW5PdBpiAIXhjn3 robert@Norma"
- name: harper
keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOVvIshMkRx1f9m2TTJ1lMHzsaBnuxZdoMFm6hmuzZzo harper@caliban"

View File

@@ -0,0 +1,107 @@
# Red Panda Approved Sandbox Environment Variables
remote_user: robert
remote_group: robert
deployment_environment: "agathos"
ansible_python_interpreter: /usr/bin/python3
# Incus configuration (matches terraform.tfvars)
incus_project_name: agathos
incus_storage_pool: default
# Gitea Runner
act_runner_version: "0.2.13"
gitea_runner_instance_url: "https://gitea.ouranos.helu.ca"
# Release versions for staging playbooks
anythingllm_rel: master
athena_rel: master
athena_mcp_rel: master
argos_rel: master
arke_rel: master
angelia_rel: master
kairos_rel: master
kairos_mcp_rel: master
spelunker_rel: master
mcp_switchboard_rel: master
kernos_rel: master
# PyPI release version (no 'v' prefix) - https://pypi.org/project/open-webui/
openwebui_rel: 0.8.3
# MCP URLs
argos_mcp_url: http://miranda.incus:25534/mcp
angelia_mcp_url: https://ouranos.helu.ca/mcp/
angelia_mcp_auth: "{{ vault_angelia_mcp_auth }}"
caliban_mcp_url: http://caliban.incus:22021/mcp
gitea_mcp_url: http://miranda.incus:25535/mcp
gitea_mcp_access_token: "{{ vault_gitea_mcp_access_token }}"
github_personal_access_token: "{{ vault_github_personal_access_token }}"
grafana_mcp_url: http://miranda.incus:25533/mcp
huggingface_mcp_token: "{{ vault_huggingface_mcp_token }}"
neo4j_mcp_url: http://circe.helu.ca:22034/mcp
nike_mcp_url: http://puck.incus:22031/mcp
korax_mcp_url: http://korax.helu.ca:22021/mcp
rommie_mcp_url: http://caliban.incus:22031/mcp
# Monitoring and Logging (internal endpoints on Prospero)
loki_url: http://prospero.incus:3100/loki/api/v1/push
prometheus_remote_write_url: http://prospero.incus:9090/api/v1/write
syslog_format: "rfc3164"
# Docker configuration
docker_gpg_key_url: https://download.docker.com/linux/debian/gpg
docker_gpg_key_path: /etc/apt/keyrings/docker.asc
docker_gpg_key_checksum: sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570
# RabbitMQ provisioning config
rabbitmq_vhosts:
- name: kairos
- name: spelunker
rabbitmq_users:
- name: kairos
password: "{{ kairos_rabbitmq_password }}"
tags: []
- name: spelunker
password: "{{ spelunker_rabbitmq_password }}"
tags: []
rabbitmq_permissions:
- vhost: kairos
user: kairos
configure_priv: .*
read_priv: .*
write_priv: .*
- vhost: spelunker
user: spelunker
configure_priv: .*
read_priv: .*
write_priv: .*
# SMTP (smtp4dev on Oberon)
smtp_host: oberon.incus
smtp_port: 22025
smtp_from: noreply@ouranos.helu.ca
smtp_from_name: "Agathos"
# Release directory paths
github_dir: ~/gh
repo_dir: ~/dv
rel_dir: ~/rel
# Vault Variable Mappings
kairos_rabbitmq_password: "{{ vault_kairos_rabbitmq_password }}"
spelunker_rabbitmq_password: "{{ vault_spelunker_rabbitmq_password }}"
caliban_x11vnc_password: "{{ vault_caliban_x11vnc_password }}"
grafana_service_account_token: "{{ vault_grafana_service_account_token }}"
# Home Assistant
hass_metrics_token: "{{ vault_hass_metrics_token }}"
# Namecheap DNS API (for certbot DNS-01 validation)
namecheap_username: "{{ vault_namecheap_username }}"
namecheap_api_key: "{{ vault_namecheap_api_key }}"
# OAuth2-Proxy Vault Mappings (used for SearXNG auth)
# Note: These must be set in vault.yml after configuring Casdoor application
# vault_oauth2_proxy_client_id: "<from-casdoor-application>"
# vault_oauth2_proxy_client_secret: "<generate with: python3 -c 'import secrets; print(secrets.token_urlsafe(32))'>"
# vault_oauth2_proxy_cookie_secret: "<generate with: python3 -c 'import secrets; print(secrets.token_urlsafe(32))'>"

View File

@@ -0,0 +1,415 @@
$ANSIBLE_VAULT;1.1;AES256
63343266373930636632373764653162353131386330313565656139663132373764303333623361
3866643138386134396330643832303263346633653566330a376434643031326663383165393266
31306366643937396161633864653962313063316133623966333863663832306437393637656335
3061333530343639620a623663303836373633623266393932393238393338306534323062656363
32663032333131663138623533613136376666646163346463613563656365393038363733373663
63323138323338316534616432396636646262393461653761356664623662633962343866366234
39313330383565623239353031366630373531623033333836316233663436346535356166623962
65613333613634626634333064613564616462396136373939636433383162366266636331373365
61383839666563343365393934353764633635626130363562633432643633373431373563656162
62373236646138313636623838653065333038666364613531306637623731353565313032623765
66306634646562643234366365643534366430306333323239656435663732333438343262316166
65306539663363616638643036656136666432373164386636363038376263663636663662656662
66353837636162653462626430343835306564336365373731643931353766653165363463316466
32636431643863633531313464303937313564383636623330663061643466393734646462633236
66643430333731623564336430363061616638356561346262326236663763316563303939393865
36366632393034313866386234643832653861613330306337353731396537633162363934653863
62623030366263343732336634343134383861323130366461343930353335356461353735386161
63306364363430623136616437643765313835363834313432326439323432353463656661623139
63313738393832323031373238333065646538303331316132636663616663326139323765333231
38663362646664663835316164343533353663393666653865326439613431646262376566333063
64346436363933313639326233383934623539396334363431653439303332316534646464613565
36383031613162343362643230336634303766613536376666623335663433363035313363633065
38373530343530336132343038323765656436306537353863326238363263626264636434393564
35363730626434643538643136653766313966616336663666323034643461373462346466316130
38343736323730623037393639323065616639373533333265333266366161393962303732353034
62326534613736643335373461666139346661353335313638333339656238316136323262343330
64396166336466646635376262323563313431393662663138323335313763623066663561653530
66333362313833346365383666313461383434623734336336656536343633623163666664373232
61303635646138346338653730656164303966663533643036323131323862363065323631396364
35663433366363613962303664383032363065656139656532306162353238653464316331323166
65373364633834633063626334343365323466383264633763306266333732653935363835623039
33626437383138343839653539653361373032363536633734666330303131346534323333663131
33623935663663636261313030306366326631316130363663373133616262633137356132393465
31353464666365666333313639346439313334313861346461303663366161303038323162366564
31313032623538353230306339383133363662323761366431366563396464663935316334613730
65633532306132313032356630656630313135626664306138383264666430633831386661653236
38376530343635656530326466346337623564303162373536386534626237356639333333656339
37376630373037643830643334656461323735646438313664353961353464306431353438623631
33646464383663373734373863383663393561633234656261353139616534646331396465653766
37666236643363363637666463616137613932306462363035623039653532303262356363626434
31343530333235373835363732643232373261376464363363313464306537316530306430653536
65653563363763633737626334393735643563363730623262363265326561666563396438636637
36363036333331373361306663613562623931303037333538363663666362616636633963386266
34303837653032383261333037363765633234663061316231313766396637386464306430613439
33636332343335636532333662633632663664346133353865613062343331356637323366653961
31393733333139316462336564363761626636616561336165323830363732323035326138343364
34316231303533353637393962316561666232636339396533666435633631313234336530336235
33346339346237336236326330343939366163646138616237643038396136323235383737306537
30366665316661316463633163663835353435656330633966333863356633643163313734303161
34373231373439663937656363663662376539643739663331623239366237306365353663323937
38646239303964633030376639363365346461333336313965636364336632626435363162366131
37323961326330343734633430666636663633363866656466346236383631633939373531323830
30393133646431316532333061643164313639636138636536393666343035646166363539623034
39653932313761633664386335393635366631333334663137313662303031343462346337633238
35363334373738313830333833613134643066356637366538326264333161366564323861343862
63616462346535323434353363323537653163643839666534353931653262366666303236383365
34343066373065373338666135386133366365633138346465323565313864376564343830323564
36633261353335386438393437353238626333323539666337353932613034326534333466336431
65353065303433613236313435353164313539353535353564653062343037306137616639323062
34656535313133373264383236646234323366386238616563336330636632313263663861383432
61306435653130663938663633383530356365313531393561373530396165383034373933303537
66313732396162393266313637623063633065653463393165383864343965346136383939363531
64663438643139336230653464313736393439326430333864353231613932393462623333386539
38306538653633656239646364356433323530306138393863386533623636333832333534616237
65353164383362366464623737336162326162393965646435373532373639386533386132343765
31353230316432383038623762346164383130323264383933393236643066333166643665383164
64663965323235643632333435663065333662376537386130313163633361613733306466333338
65613537353133643632393661353633366631386564636136643635623534353630616337363633
66346137326335376665383032663039373462363865356532386530396535303234333261373536
35396137613063336362653561376235373932383465393462306138656539316336643039303864
37393434336265376161346664393333666335343764333465313165663438643263353633633065
36353662653566663536396565616366333631643966656632666164343030643734353230323938
37303531326531386563623365306161396336386634383264386563323365653731323865383930
64333738353633646366353666643461653965633037333039623366636233356365313765363031
66373262373935306632663066656263353934343061323761366262643937396164336435636139
37663366353165646238353239646335643333383233383237633161363762616339643632346663
36313433643439303036386639343564643061393833663933343032663830383864306363356632
37326135343364616264353434663234363861313066306630646366356436323939353661383563
63363031656539626136336130633432646531653831616232643961613462393061383433653939
62343735306435666231656563616536346639646139336361613637663931393331323138303939
66343762363032663764336465333264353765613265373536656538666538663866336237303466
64656534356431643236353133316435353831633339386134333839386138316661383165633166
64323262616565303065643636383038363235633036343833303163353530666331323363623961
35613130326330306539306161633764653138383839336466646262373433653466353236356563
65616432663066376639663539303863396637373533623232303031336365373861366262656532
64313163633732623030646234386133613935633134613763323536343831626135343164383734
63646135393461333463343934333362333365363237356430306162666631333235316363376566
64383632353736363537653434363037613931313761383866306465326433336465316633303763
38306364613037363537343839353938326138623063323735653834313639663739323139636437
38663665333839623736323435386332393738643466316666386631396532633865383665323965
32373130393438656431323861383035386262353261313534646339626535393538393862366530
34636136363165363863646538653430376236313733613830353665616262303836353338616232
66376337633831623531613530356138666330373661646133666666316538386661386536363061
66336332393439646231303634376364623131653536373464323233333531636238326530333539
62653937363162646232633134646438643735653237396163396631656439366433323038323438
36393262303664356637363739633836336631326466363639323765633839373164316139323534
30313862316135323131633337376566656665613735613934383439306266623938356231626639
35373934396335333138343263316538613535343162613637313239376235346539393832343939
64306261323965613066393865663939316566366262626132616664303132616565383838353961
66303439646565386138366533393564353762323339373366393532383935343665653035346636
62313661636138393930346362656638333230336537336336616634383561356661366136616631
63626264666439656439336533633362393930336535326636633436646264613866356562376234
33616239326633643533323637323638346631613264383931373834666633346437323161303466
36353466396633656461653432393563366231613565663335666432343838326631623861666136
34373264383435656665616365666334373135666566363738633962393861303635363935346638
36323761633535633131356235613462636438616431346465323862373038353530666464323064
31356233326161633838353334353632616232343164616664396437666563393266653132313939
65303465303137353132646163376463343563333331666637656361336538333030313736343836
61646339643833346663396661383735626261316239636265343837393161633333616436373064
31663635653863613638353236393666323364616535363965633136656262386166656135326363
35373030336139343062333830363734653839633830356138316431363962356538363837306337
64313962666261663435626236356666333834373261393165316436353936616437343035326262
61313535666233303366376533383237316138373430636662323565646564333333333436636339
38643731666462613533353030383535666561643637306565616232613666653435316639653362
31333563336362373061636139373034373337343261343336613165653438393037316562643766
30613034353133653936616562663039363533366438336638306461636533363633633166646163
39303765393133333536643636326238363534653465313833323461656531386637323730616139
38663830653363333732643464366235336661613732643163323232393264363637313032336230
39373636353231386361326137613732623238613233323131613836663630633634346532633639
32333239316365666436323565656265643661663036363163393861356138326463353862663063
66633462636632386438613165613766653965656435313231623739393162663562393033333237
66363162653936663637626564613063323865616163393739623437313235366662656665333063
63353234366436333739386339636436626532303261616332643834306238633334303436353139
30626361623637653731316539313966656538653033383362356366646233363664373566383365
35306331366161336432613962333436666539643536623165636130326230346364323437353730
63323866316632353261663965356431323633313234613563306135346265333431653033633430
30303861343161636264383235656638373832626436633035343239313939626534343739303063
65633537383935306161386262386561333862313332313639653032373965343635353063636262
61313733323135643831353266363134326534616634616638383138373630343434623865343035
62306165623335366434393164663631326535393965393064623133396264366138626363343234
63383833643937356462653331633766383363336539653061636566353732353130643861396633
64633336343263376132386134326665613762386435313665353537346238346132306232633937
33373264333865333031353231316266376530383830626163366564343939623930376565646365
64626564653761356230346537333037323937323066393463626137656539326565363734626231
32373264663031343963646535653031366666623061393736393164373137613466393935623835
34663735323439643534366263663432393433346533363333386230656237656130383731366330
36623538343535643062346166613362333532633263316335333262346161613439353639383564
34646338633537323035623734353933306534646438386537643166633632333365383634376431
34336661313430383661623739386436613734373837353765313235616632366464613339353532
32613938623038303834346337383461663963616466313666323639336130623761383133373031
33643139373466323662616330656562303061613730646461373033363261666632613836613539
30633539313331373366353638383661613037393137383162313037666163643566346166653761
31633139356163663033336362393535336163313037616530633365616234393262666433616239
38653830333063373736323238653430626530323431653133316533613836333736373966376666
39343738663532343731316661386537336535363764343537303037653261633432363734333362
64393239333564633837646666343933323834323336666538656665653637653338383463656661
66643464306338636330323764363437656236383339636532353162663438646335363534626437
34386231356161623737636436633636306636646162333537663663303532626436656430343161
64393435656665333837333266373863376265343935666333353765363437653033323866653838
64643039343263326166613432666365383264663165663536376433333162306265383566336266
31626239383432333934363734666535303334616535656630323363393436626436616335396662
32373432656632376333376630653465366336393264643462386162376134396239646439396466
63373934336437386663633766666634626665353263343361376130333261666162393334383563
64626436363765353963373665306131343739356539616464363234633739356233646664376566
66363833336532633439323563316131303065616633336137336232346238656237616235333764
39303035663635356531393936303766643834333736666461353132623233373862343264363635
33653439613761383164346637653636653131373030656131333934396431616365353861636461
65303432616664653534643539386431656338656663313863656138313261373062636337366637
65643464626435333634313463623130313535653831303765306531623935313563366238363330
64383763623131346664643461653764623565616365636662633535376366303566306261386165
62353532663133326433303638666334616235613937623231656531656361333738323939663238
62646130623732336332313865376136373937643533666531383332303465353438393733306562
35326265356361346465346332623262346366306435613531303236653836353466323965316538
30383439336431346332336332626564333530373461343738346530336562646439306636336433
66353234663930613835393632336532373531633437666365336231643537643764373431373866
34306565623530393934363932616164393534396334363766393132306466313338366335643638
37626562656362393464353061373638393430366331376139643664383836613639383764393230
38613861323536653864346635343065333734346631386231306630663639343863343033636231
35363731626533383930313130313438656532323161633736646365353663383166383062616364
61396631373131356134343563666466633937653766356561353437363566383161386564643333
36303363616262663066343532336632353262633763393964616438316261343432626264616666
62643164383234666465313961333966363933323665323730633931336538353537393239386635
65663263376461636561383032353337346264323662373631616537653930356338656264303766
38656239363539313961363463396139363133666134303936633061663036336538666163323664
32613234303935353837616566373163383861336166346466646262386563373661623033623864
35383534353866303764343661306138646265303439343036393462623163313064643433623965
37343438636539313862626632383831636334333664636131303234383330393334663837336436
34333032653630633336383535656666393962383863643333616264353163663939373039303337
62323965626662613435306636363732376433343132646661396665336432653232646637353230
65303465373137613266333130623063636566663265613435643464303961633962396334663365
38356161656563333966623935393633326565336533613666663834363561373334363434643839
39333765303137656362356233386366303736653031643431663138336566383264373536353234
39326137653634373235303466363663336662653036363338663363616432393135356231656236
64653836663033333639626533376237356163343961323539323964666239343738346230323337
63663163666537343463623565633337393036653037656331383736393930373239333631343930
65656663656663646235313364333062663938393537303261313032663161383535386365326662
38643764373134336636306338323634386438396563643662393132303561666363663464616535
62653865633736386233386630306238623563306139353038613737363031393232613934626533
65616139656265306337663165323338316665613138336164653637373738656332376563346137
62386161653836633732376161316562393436363536333132356136396361316534343135656334
35626436396464663832383336313235346331626464313835646466393966613835353537663962
61633433633134373765373839386663316266643834353533353936313633613436633530666339
64313962383735313665393261666564656430366563633835343565316335383738653539376334
66663334653333616464613531376562393639343765643435663835383439343230393562376532
32323337333438323463346466356533386234303465643739663261396637646536353233326332
61366232653232343834393765323163636432356234393766353365623636353930336163663434
64343535383239343862653661393962643861313764636666376362653532383936626564353539
37306133313833623361396535333235663034343264663131313061353766396365643639396663
33396630353234336336353034636630613365613964613866313331356539623538616138623539
39663466646638393436336438653039306166303066303761393838353861666165393035623065
37626265646436636362613033363066326138666261353931343063363736333135366638626338
39656466393964346565313839343036356538353464663234643164323865313764346661393066
66663139353335393936613366383835613030616465613162653763333530653665633830643038
35363662316566356637313463643461663833646563396635353036616330643565386239343139
39643533313664613634326637333136626137323833326161663635623235666530303466373535
30366234303134373733383138366462323062396362626662306234353863336337633263353637
64613531653436393562343936666336343231383935353264313536323037356638663733376165
38613333326263646337303630323761386439616333613566333431376638333165613966373962
30326434663130653434373130393863386163616537343034316462343865616537313364346138
34363535663539663630383333343836373065623030393135373531663961646661376332363834
61363331643464323966653737613130376434666362623765386632653665373834396663613963
63323262323363303733613731663066383261363938303563363462396238643034623437363464
61383566623764366132386465666630623461656431326333633066303034663262343439613634
31646662663837663161623036613631363163656364396531363235616133376633323361393535
32636631613239353637313337643536393538363531373636336563646333333533663563623131
62393765323432633561613338336362343665633865326130636635366534313837636362373138
31343231353837366262393237636464313736343063313536383438366263386331393039383033
39353536326462376263376263303835393331613562633966623763636562613364376239656635
39363639303938373237393531373538623739626431343939363063623964343138623763616639
31666566333966306264346263663333343139333765376135383633386137313035373239663833
39346137333465633239353761393666653231363264383331353435393864626461333863363966
34663062376537353133346130303330656631386164613263333933333438346132303362353031
64393338376631343131366362643766396137346431653439323338353338396235333630313233
36323131643837366237623333643134373666616362663464656364346436323037663135373462
37303063303033346230373134393366376636393431666136366636383038333966356561386232
35653766303235336334656336373339303039353935313239303838633236306433666336666664
66653735633236343235663766383964386237666437386362626336323136393530343839383865
39343231356164646530393439613832383364316234353733363865616439646239303231653263
34356564316236343837386261343430323935323066633938613764613465306137653265656132
61346633616139343430616630643333663636643731356266356530623030636538303737383462
33373962393235636266393364336331643566366266636162613334333639626430393965343065
34393261356164336166613063633039346165633263633336626338653762336338633033313239
65326334653464613330346430393138356331373861656161323736376434396464376136663434
32343461333934346534343561613530386661396562343730656630313064643766653030363239
30313064323234316638613733613939303830323736653931393663346130323361663265396634
62613831313837646364646363363431633033393137326136353363656637343137656539343730
66386139356261386137613331336266623239383764306265323635383338646332636337623230
61346462636134653133333733643231356663633032323332626136663232353964656166613763
32386565316131316134323933353133343034666135383635396535646435396365626631306665
61353366633865623765306663383837303037303532666135333461303334636639336665376338
63653338313463373465323536303732303463356166393365333264623537613362656331356337
34336564343361663039396264646566383330313739643566663861363661353263383531623632
31616230643239386561323432356237336361333561643831373132393437323036623962343666
36376530373938613539613734333465616133663833623635333262306138633639666236613661
32393236653637376265633131336265633333393836663835313765666130323631343537356332
38653238646630643535393032653263373131313335663161653264636435643135363063323135
66636666666437326261396664616164303239333666323463373662313463656361386461636438
30383238643336613861313265366361636664303565333732326134613537376261656639623739
66313633633764313632663462323862356265353432306362373138393838313334656137353039
33363933323036653561303933613832633263626562623836633030326630316363653834656166
37326139306432636566666534343661326565343330616232393434653634306563383962376633
62373034646139356662353139326163323439666461363937616565323639663237393939643632
35643632666365316236633461643965303866653037623564363631383338383830346537353232
30333262316135633639363764353866353631346430333066626564373133663630383065306233
63653437613137326165653239343130326565313462363235353035366236346261656532626463
38383130613861343736323961393838313337643062663939643236346531316461306432393265
37343035346366306561343632373262393437396262306135613330303938346161663065653639
62623133393338373966353965313265636335303733343234643466643233303561376365613137
65633761613633336536333136336233633363376530643832333438383634316533323632393437
62653264306335663438303963366438306463366565663739653835306638633761326562653537
39313861663837316566393665663565333736363166613733306432353039376430306639343538
35336531313637386666373337643265313734373532313132393961333831376637623366306436
63376336663461393961643038353864363766653564313662343062316637373335336131653830
38663633313861306636616231343337313064343034393062646461386137373534353638313039
62353165613138626434636336323866396536626364393763336330343435323263663664373966
33363865373764376231343162353038396366323136396337343536343630323963346536356535
34613031316531356433313665653838343339643533643862376139313431643764396432323234
31643964396234353933313032313438366663643231386233623163666233343961613838613334
37373532646366323865636564376564386664663834663436333133383566666234303435643231
31373264646534636335646537333565663161386437326561386530396135313939623462633031
37366663313564346339396636343139623764653232653432666631353663333161353763646331
31376263636366313361313138653365373165643637663134323530653030663837333637633034
38616634393031353132343630373162393638333932376234643038643938656437303234613231
61653832393438393330346366316163363033386636363835336436396434363663633262376130
63363033313435643639626138636637656333633232386362353936366166323835616437393939
36363737613134386665363963663538376137326432386265653436626233376631326236313131
62643336613563643132363635333930323233666562353035626530316438353136663663373666
63623030336364666632336330626630623837316535323563393231633665626166613765393938
35303333333633346130323930643262616234313564663136643237656462653161613261396231
38383064666635393465353634633936643639336638353163656236346666616566636664383936
61643630326133323439653261633664363833356437613339646230383235663364323137656464
64626364336133356562313235613235306138626436643331626662383234313363616563386335
39393334616365343666363763653232393439656638313562346634626431353162316430343931
38663364623463353339303064373664633536333037346461363636653162323462366263653232
37303836343163373030373564343139366465343236306235316336333261653964383436643263
64376632333931346433376632653733613437363934623338346439623738393064333330633936
31343263376234386238636131663763346466303762363835303231323939643934633662663832
35366230663237623237313731633539353661613763386438643537363835646336626461313633
39313235313937383631646463373937353464356633643031316361356331363063393630646337
61303036643830663763333735303534643935383731313866633863356437343962353964653163
39623862333162353936643430643038663732356263643635353361626430353833346165633631
64646166366231643939313164353261373036623761623433666431373230316662373536646338
30623433383435386133323062633136663437333166366131346164316666616434396530343965
61643962336438333936303938633536323365386137646235313230313363386561306339373831
35333134366536653961643434333865343130636565633866366533353361633439333263616636
37633063386439653937333861626464306163323265343338303235326234303737313365653537
35326338653638646465376235646639313736616430353739323162373866636361646664303134
31643830393836626431643064613733313461656437336463363536383737636230333961303466
36646433643932613166306333346132343366386438353363386134636237323732353433346635
34373138616664333266386233376363393239366162666534326566386164646138613638656463
34656335623238623330616137396337353337663838643432393136316133303263376531336431
32313335636439646664373338623465396132643965653231306634323337393036386437313366
66373634653536646664393034316234633465363837666134636537666165343437636636363366
35356563666539383630303562393131386539346431653031313565323665653937396339346465
37363731303933343961396430653865656535386263386161613864636662666263323834616539
33636565383336333437643065626532306232376461313463323530353539323062393664383535
62303362376237313564333339353933363538363636303961663538313337333464636133626361
61616466353730656235643139633763313733313738363662393130373330633161376266383563
39666533336531393662393830396231633536333839646266306464366235626662386634333139
30343536386135333336313430653136316530393939346636383363666335366266326630313261
64336535316239646566356633366264303335356637343736373234323138366239623761653032
31346333623238663539383035646266616635336634373730336263626262346538346137343865
30626332666565383963356634336532663133626239343234633830366639396365613334613764
63386364346337383962343731373633376135336531633033346666626631323736366230613036
38636361643935346563643133386334313730396661323738323637356437356664336333366133
66373739353533353264636235663034336234303862373732636234623965353061616135663262
34626538316333613139653632313835313663646535643666323438333965383261663633303730
37623631623530313330396164376465346531613361633662393338383336663233313934316132
66616637343933353961326461336466333137633138656239656565346639386565323931316431
39656132666430326434613032353936653335303163616539376434326365386463373539303235
62326138623834343437326138386230313634383863613266316638383435656666373266333162
63333166653862623461633330383131366139646666326266303962623465353238326164633937
32626431636337343437373834336231323431626665393266353362323164383233633262663432
66313461306666633038653365326137326563396231323734353733653639396564666137353566
63633063343232346465373132333032633931396263363932653039633739326433613864346339
39666332366234326434643265393338316664326532383134373366613964326638346163343838
36303339323563396134663031386439666439346437316136326662316133366230326431643935
32663330393564376439303965383633353336613966373566623830386331636463336336333066
62636339383062343133366137663332343536626464323162656236336634646566356134636237
37396562643333623530363065373230663130623735633366333437346333383466303061653333
66376261623437343964616131383133316438393337656135633136346161333831616634633733
33646636633035373664383930376131363334303637346438316161306132653666346439363165
65376561303036366630316531346363303639643961376530646433653765373533616138366234
35303130326131353961623630623538316239383330656536316364323838616461303237623966
65633763333332646531356638613439663239313566383865396234626631383135303431656332
35383132326166366236323839363461633766366636643832356562386332313666643636373031
61303364303835306438653330366163656632376365383663626337613534346233313336663062
61313764366633613063646461376436326339653465316339663461353835303562613538623239
32386463383638366432303362636635626536343438303362383133386161393731376538336465
64623561326163386330393833636264663833653739613962326634326233666630643936663830
63393765356338343463653662626564613962336538373733366430643236383932633666353061
36353632363131333364333962653239623266346439643537613031323763623833326636616461
31383836393563373437666662643064386233643935333165313739393730336132306432326430
61623464343664653161376332393333663764623232363938636161623539336263353539666464
37383963643933353034626331623064643232643662613633663631356537646465376264623532
65386430376230613730353831646661613362623235396639623035653135643333373065373234
30326664356564656634316462333066343338653339653861383239323764623931633630616232
64303834663338386266323064343663653534333033376364666532313830626237373033316233
31633665363635353139366162376130653538353861666662653939613066613965383364393065
36316633636338373535653662316239633434343833613036393934303465656635333335616133
32313438393361366437666538326466306462336538346366643366343762653530663633633737
35656134623237396436333437633933663761393636336135353764353631393332613336633466
63653334646239386365343036323437616330336265643664356263313062373364666461306130
31323435393765373131326233363530336161393430613965303366653930383565656262396232
61353562336235343335666439383635306633313063623638343030653665383033623662336430
34303934313365613263316533386161303034396262363130623661626235666131363665336432
39343766333363663238313666353864353232363133653431643764653466663739346537636364
32306335363332623534346362396362663738636561633937343232616634396237336531336139
66313663636139323331333966373835396632383435383638373539313230626664386233643930
32373362623933656131653362643861323733373636356266373464326136633332396337376634
38326662316537353337636332323935323962363365376463333039316639303666336537356166
63353837376538323266393433303864353735666432303538323730656339306532316639626233
62636331363235323838636534666339616135333238646330646537346563393134346366346638
36656363663563373261323566313130393235646362383463323936306131373865623161343061
39346330643132353033313836633838303931363365303165356338333665643165366261373835
65313232363932383438623133653330643463623734313830353334353563316163633966303834
33393235383763373034653364323062376238623064353632346332373364333861343634313562
35653465663763396330306534313563646261356363343637316631383732623463643662393163
35306463663063633665366630396135346138656266376465353138396631653239363730336638
34626461376638663961666236376136383739636264333637393964633438353665326161306437
63353132306136303132633963616336653031323233373037636136633561613932653333636563
33336438353065326263646432386265373363316166343931396464616165386630373530393635
38346434343366616562376238353963306464323535313965663061386436303139373235633562
37653966396666326533363338386639663436343637376565303032376333623566386131396230
34653239646531613065386365626564353532356432336365653965643962333536373164303430
30343639323136643438306438663531633235323161653237626562356430303230663832303463
37303562373764323764383762356535633734383731666464303632633637346333646337623535
33323632623763623836616432393231373364623163333162616365313638316162313036616539
30336665393034303437646132353336623363663230393335633935656663366565326235363439
36303230313564393637366434646665346665383931393462663531383131346466613563383031
61363136303537386666353965653330336236346136356535363437366533306539653636353638
34313835353038383533323232323730336137666430663865306461643239306362323464323264
38336230393338363461656639393332353563366431333836363935633565383331656230316131
63663463343266376330323130613332303534623135386639333834313264623637643634653333
31666135626664323265663461346135366462316433643161316235363563636432616364653361
66633661653362393139616163646264346566616337616638613861313937346664323934623435
35306534353062323234383236343532613533336635303464383533333734353861393330383732
61663566386333626162396666643737636164323237356533383834303930316631346237343732
64373832303663333535366566336438636139333434633436396233383238663561396432393135
30623434336539653732383363633164376634363766353764336431623431363537613833343632
31646366623439373065626139353939626662333061343038313432616361306533626633653135
63363865643739656561306331313962376536613832636137613831306431613964363434393538
62613237356564613739666166336334643639633037623230303134623233343861383934353830
37363861303963636535623336356132633164316339646231306230313066633536353036363839
65396434663861636230616530386232633837303462313562353734383134353661653138623537
31313533353331626235663163663061663631303731363565313262366535303932663239616466
63313234356366323537653736663630633532666265326665303266623761313939643263653132
38646634626135653737626563306362383835336361396434313062363563363439323831323566
63626137616561646663333433363037376332643732663838306361653365383831386230643162
34303863323638363566643733313036336233303037316430663930396565366163623539656338
31613862336166376166356134336634636537646532313035633331343862376332333838333231
62393838623030353338666563336533333265336231393830623264633762386237653364393030
33336361356139396561336463663963666663616231313432313565383034643230346162653231
66326535653235643361386135616439336434333638633664393138643765613066363963373636
32343530613539313434363561616336643236333032643835396262373933623732303335376162
63613663336531323137633762343832343634653638343263626662356161336163396132383439
62383364323361373639373137393562363464656238623565343362353265663636376565616164
33346537343366616663346263316237373666613634333763353838636663656139326636653066
62333638623432616437306533316337356438376362303461343934623366656131623632333935
66646130303535626565653138353137633232613131653664356466393932633762366161376430
31643938643466306436316365613938666635366430376665336166613763386338613235356434
36306463376233653264356363353134313663666666623039313039613039663862643663343132
37643032666135633438313635313961333638643862616265643561346661643862353331613839
3234656634393561653937393036376466656339323862653662

View File

@@ -0,0 +1,93 @@
# Ansible Vault Secrets File
# Copy to vault.yml and encrypt with: ansible-vault encrypt vault.yml
#
# All secrets should be prefixed with vault_ and encrypted.
# Service variables in vars.yml or host_vars reference these with:
# service_password: "{{ vault_service_password }}"
# PostgreSQL
vault_postgres_password: changeme
# Service Database Passwords
vault_arke_db_password: changeme
vault_casdoor_db_password: changeme
vault_mcp_switchboard_db_password: changeme
vault_openwebui_db_password: changeme
vault_spelunker_db_password: changeme
# Neo4j
vault_neo4j_auth_password: changeme
# RabbitMQ
vault_rabbitmq_password: changeme
vault_kairos_rabbitmq_password: changeme
vault_spelunker_rabbitmq_password: changeme
vault_mcp_switchboard_rabbitmq_password: changeme
# Caliban
# Note: VNC passwords are limited to 8 characters maximum
vault_caliban_x11vnc_password: caliban
# Casdoor
vault_casdoor_auth_state: changeme
vault_casdoor_radius_secret: changeme
vault_casdoor_s3_endpoint: changeme
vault_casdoor_s3_access_key: changeme
vault_casdoor_s3_secret_key: changeme
vault_casdoor_s3_bucket: changeme
vault_casdoor_app_client_secret: changeme
vault_casdoor_admin_password: changeme
vault_casdoor_hostmaster_password: changeme
# Gitea
vault_gitea_db_password: changeme
vault_gitea_secret_key: changeme
vault_gitea_lfs_jwt_secret: changeme
vault_gitea_metrics_token: changeme
vault_gitea_oauth_client_id: changeme
vault_gitea_oauth_client_secret: changeme
# OpenWebUI
vault_openwebui_secret_key: changeme
vault_openwebui_openai_api_key: changeme
vault_openwebui_anthropic_api_key: changeme
vault_openwebui_groq_api_key: changeme
vault_openwebui_mistral_api_key: changeme
vault_openwebui_oauth_client_id: changeme
vault_openwebui_oauth_client_secret: changeme
# MCP Switchboard
vault_mcp_switchboard_secret_key: changeme
# SearXNG
vault_searxng_secret_key: changeme
# PgAdmin
vault_pgadmin_email: admin@example.com
vault_pgadmin_password: changeme
# Grafana
vault_grafana_admin_name: Admin
vault_grafana_admin_login: admin
vault_grafana_admin_password: changeme
vault_grafana_viewer_name: Viewer
vault_grafana_viewer_login: viewer
vault_grafana_viewer_password: changeme
# Pushover (Alertmanager notifications)
vault_pushover_user_key: changeme
vault_pushover_api_token: changeme
# GitHub MCP
vault_github_personal_access_token: changeme
# MCP Authentication Tokens
vault_angelia_mcp_auth: changeme
vault_athena_mcp_auth: changeme
vault_kairos_mcp_auth: changeme
# Arke NTTh API Tokens
vault_ntth_token_1_app_secret: changeme
vault_ntth_token_2_app_secret: changeme
vault_ntth_token_3_app_secret: changeme
vault_ntth_token_4_app_secret: changeme

View File

@@ -0,0 +1,24 @@
---
# Ariel Configuration - Graph Database Host
# Services: alloy, docker, neo4j
services:
- alloy
- docker
- neo4j
# Alloy
alloy_log_level: "warn"
neo4j_syslog_port: 22011
# Neo4j
neo4j_rel: master
neo4j_version: "5.26.0"
neo4j_user: neo4j
neo4j_group: neo4j
neo4j_directory: /srv/neo4j
neo4j_auth_user: neo4j
neo4j_auth_password: "{{ vault_neo4j_auth_password }}"
neo4j_http_port: 25554
neo4j_bolt_port: 7687
neo4j_apoc_unrestricted: "apoc.*"

View File

@@ -0,0 +1,23 @@
---
# Caliban Configuration - Agent Automation Host
# Services: caliban (Agent S), alloy, docker, kernos
services:
- alloy
- caliban
- docker
- kernos
# Alloy
alloy_log_level: "warn"
# Kernos MCP Shell Server Configuration
kernos_user: harper
kernos_group: harper
kernos_directory: /srv/kernos
kernos_port: 22021
kernos_host: "0.0.0.0"
kernos_log_level: INFO
kernos_log_format: json
kernos_environment: sandbox
kernos_allow_commands: "apt,awk,base64,bash,cat,chmod,cp,curl,cut,date,dd,df,dig,dmesg,du,echo,env,file,find,free,git,grep,gunzip,gzip,head,host,hostname,id,jq,kill,less,ln,ls,lsblk,lspci,lsusb,make,mkdir,mv,nc,node,nohup,npm,npx,ping,pip,pkill,pnpm,printenv,ps,pwd,python3,rm,rsync,run-captured,scp,sed,sleep,sort,source,ssh,ssh-keygen,ssh-keyscan,stat,sudo,tail,tar,tee,timeout,touch,tr,tree,uname,uniq,unzip,uptime,wc,wget,which,whoami,xargs,xz,zip"

View File

@@ -0,0 +1,20 @@
---
# Korax Configuration
# Services: alloy, kernos
services:
- alloy
- kernos
# Alloy
alloy_log_level: "warn"
# Kernos MCP Shell Server Configuration
kernos_user: harper
kernos_group: harper
kernos_directory: /srv/kernos
kernos_port: 22021
kernos_host: "0.0.0.0"
kernos_log_level: INFO
kernos_log_format: json
kernos_environment: sandbox
kernos_allow_commands: "apt,awk,base64,bash,cat,chmod,cp,curl,cut,date,dd,df,dig,dmesg,du,echo,env,file,find,free,git,grep,gunzip,gzip,head,host,hostname,id,jq,kill,less,ln,ls,lsblk,lspci,lsusb,make,mkdir,mv,nc,node,nohup,npm,npx,ping,pip,pkill,pnpm,printenv,ps,pwd,python3,rm,rsync,run-captured,scp,sed,sleep,sort,source,ssh,ssh-keygen,ssh-keyscan,stat,sudo,tail,tar,tee,timeout,touch,tr,tree,uname,uniq,unzip,uptime,wc,wget,which,whoami,xargs,xz,zip"

View File

@@ -0,0 +1,74 @@
---
# Miranda Configuration - MCP Docker Host
# Services: alloy, argos, docker, mcpo, neo4j_mcp
services:
- alloy
- argos
- docker
- gitea_mcp
- grafana_mcp
- mcpo
- neo4j_mcp
# Alloy
alloy_log_level: "warn"
argos_syslog_port: 51434
neo4j_cypher_syslog_port: 51431
grafana_mcp_syslog_port: 51433
gitea_mcp_syslog_port: 51435
# Argos MCP Configuration
argos_user: argos
argos_group: argos
argos_directory: /srv/argos
argos_port: 25534
argos_log_level: INFO
argos_searxng_instances: http://oberon.incus:22083/
argos_cache_ttl: 300
argos_max_results: 10
argos_request_timeout: 30.0
argos_health_check_timeout: 5.0
argos_kvdb_host: localhost
argos_kvdb_port: 11211
argos_kvdb_prefix: argos
argos_enable_startup_health_check: true
# Docker API Configuration
docker_api_enabled: true
docker_api_port: 2375
docker_api_host: "0.0.0.0"
# Neo4j MCP Config
neo4j_mcp_user: neo4j_mcp
neo4j_mcp_group: neo4j_mcp
neo4j_mcp_directory: /srv/neo4j_mcp
# Grafana MCP Config
grafana_mcp_user: grafana_mcp
grafana_mcp_group: grafana_mcp
grafana_mcp_directory: /srv/grafana_mcp
grafana_mcp_port: 25533
grafana_mcp_grafana_host: prospero.incus
grafana_mcp_grafana_port: 3000
grafana_service_account_token: "{{ vault_grafana_service_account_token }}"
# Gitea MCP Config
gitea_mcp_user: gitea_mcp
gitea_mcp_group: gitea_mcp
gitea_mcp_directory: /srv/gitea_mcp
gitea_mcp_port: 25535
gitea_mcp_host: https://gitea.ouranos.helu.ca
gitea_mcp_access_token: "{{ vault_gitea_mcp_access_token }}"
# Neo4j Cypher MCP
neo4j_host: ariel.incus
neo4j_bolt_port: 7687
neo4j_auth_password: "{{ vault_neo4j_auth_password }}"
neo4j_cypher_mcp_port: 25531
# MCPO Config
mcpo_user: mcpo
mcpo_group: mcpo
mcpo_directory: /srv/mcpo
mcpo_port: 25530

View File

@@ -0,0 +1,134 @@
---
# Oberon Configuration
services:
- alloy
- docker
- hass
- mcp_switchboard
- openwebui
- rabbitmq
- searxng
- smtp4dev
# Alloy
alloy_log_level: "warn"
rabbitmq_syslog_port: 51402
searxng_syslog_port: 51403
# MCP Switchboard Configuration
mcp_switchboard_user: mcpsb
mcp_switchboard_group: mcpsb
mcp_switchboard_directory: /srv/mcp_switchboard
mcp_switchboard_port: 22785
mcp_switchboard_docker_host: "tcp://miranda.incus:2375"
mcp_switchboard_db_host: portia.incus
mcp_switchboard_db_port: 5432
mcp_switchboard_db_name: mcp_switchboard
mcp_switchboard_db_user: mcpsb
mcp_switchboard_db_password: "{{ vault_mcp_switchboard_db_password }}"
mcp_switchboard_rabbitmq_host: localhost
mcp_switchboard_rabbitmq_port: 5672
mcp_switchboard_rabbitmq_user: rabbitmq
mcp_switchboard_rabbitmq_password: "{{ vault_mcp_switchboard_rabbitmq_password }}"
mcp_switchboard_secret_key: "{{ vault_mcp_switchboard_secret_key }}"
# Open WebUI Configuration
openwebui_user: openwebui
openwebui_group: openwebui
openwebui_directory: /srv/openwebui
openwebui_cors_allow_origin: https://openwebui.ouranos.helu.ca
openwebui_port: 22088
openwebui_host: puck.incus
openwebui_secret_key: "{{ vault_openwebui_secret_key }}"
openwebui_enable_signup: true
openwebui_enable_email_login: false
# OAuth/OIDC Configuration (Casdoor SSO)
openwebui_oauth_client_id: "{{ vault_openwebui_oauth_client_id }}"
openwebui_oauth_client_secret: "{{ vault_openwebui_oauth_client_secret }}"
openwebui_oauth_provider_name: "Casdoor"
openwebui_oauth_provider_url: "https://id.ouranos.helu.ca/.well-known/openid-configuration"
# Database Configuration
openwebui_db_host: portia.incus
openwebui_db_port: 5432
openwebui_db_name: openwebui
openwebui_db_user: openwebui
openwebui_db_password: "{{ vault_openwebui_db_password }}"
# API Keys
openwebui_openai_api_key: "{{ vault_openwebui_openai_api_key }}"
openwebui_anthropic_api_key: "{{ vault_openwebui_anthropic_api_key }}"
openwebui_groq_api_key: "{{ vault_openwebui_groq_api_key }}"
openwebui_mistral_api_key: "{{ vault_openwebui_mistral_api_key }}"
# Ollama Configuration
ollama_api_base_url: ""
openwebui_ollama_api_key: ""
# SSL Configuration
openwebui_enable_https: false
openwebui_ssl_cert_path: ""
openwebui_ssl_key_path: ""
# Logging
openwebui_log_level: info
# RabbitMQ Config
rabbitmq_user: rabbitmq
rabbitmq_group: rabbitmq
rabbitmq_directory: /srv/rabbitmq
rabbitmq_amqp_port: 5672
rabbitmq_management_port: 25582
rabbitmq_password: "{{ vault_rabbitmq_password }}"
# Redis password
redis_password: "{{ vault_redis_password }}"
# SearXNG Configuration
searxng_user: searxng
searxng_group: searxng
searxng_directory: /srv/searxng
searxng_port: 22083
searxng_base_url: http://oberon.incus:22083/
searxng_instance_name: "Agathos Search"
searxng_secret_key: "{{ vault_searxng_secret_key }}"
# SearXNG OAuth2-Proxy Sidecar
# Note: Each host supports at most one OAuth2-Proxy sidecar instance
# (binary shared at /usr/local/bin/oauth2-proxy, unique systemd unit per service)
searxng_oauth2_proxy_dir: /etc/oauth2-proxy-searxng
searxng_oauth2_proxy_version: "7.6.0"
searxng_proxy_port: 22073
searxng_domain: "ouranos.helu.ca"
searxng_oauth2_oidc_issuer_url: "https://id.ouranos.helu.ca"
searxng_oauth2_redirect_url: "https://searxng.ouranos.helu.ca/oauth2/callback"
# OAuth2 Credentials (from vault)
searxng_oauth2_client_id: "{{ vault_searxng_oauth2_client_id }}"
searxng_oauth2_client_secret: "{{ vault_searxng_oauth2_client_secret }}"
searxng_oauth2_cookie_secret: "{{ vault_searxng_oauth2_cookie_secret }}"
# smtp4dev Configuration
smtp4dev_user: smtp4dev
smtp4dev_group: smtp4dev
smtp4dev_directory: /srv/smtp4dev
smtp4dev_port: 22085
smtp4dev_smtp_port: 22025
smtp4dev_imap_port: 22045
smtp4dev_syslog_port: 51405
# Home Assistant Configuration
hass_user: hass
hass_group: hass
hass_directory: /srv/hass
hass_media_directory: /srv/hass/media
hass_port: 8123
hass_version: "2026.2.0"
hass_db_host: portia.incus
hass_db_port: 5432
hass_db_name: hass
hass_db_user: hass
hass_db_password: "{{ vault_hass_db_password }}"
hass_metrics_token: "{{ vault_hass_metrics_token }}"

View File

@@ -0,0 +1,48 @@
---
# Portia Configuration - Relational Database Host
# Services: alloy, postgresql
# Note: PgAdmin moved to Prospero (PPLG stack)
services:
- alloy
- postgresql
# Alloy
alloy_log_level: "warn"
# PostgreSQL Config
postgres_user: postgres
postgres_group: postgres
postgresql_port: 5432
postgresql_data_dir: /var/lib/postgresql
arke_db_name: arke
arke_db_user: arke
arke_db_password: "{{ vault_arke_db_password }}"
anythingllm_db_name: anythingllm
anythingllm_db_user: anythingllm
anythingllm_db_password: "{{ vault_anythingllm_db_password }}"
# Note: Casdoor uses dedicated PostgreSQL on Titania (not Portia)
gitea_db_name: gitea
gitea_db_user: gitea
gitea_db_password: "{{ vault_gitea_db_password }}"
lobechat_db_name: lobechat
lobechat_db_user: lobechat
lobechat_db_password: "{{ vault_lobechat_db_password }}"
nextcloud_db_name: nextcloud
nextcloud_db_user: nextcloud
nextcloud_db_password: "{{ vault_nextcloud_db_password }}"
openwebui_db_name: openwebui
openwebui_db_user: openwebui
openwebui_db_password: "{{ vault_openwebui_db_password }}"
spelunker_db_name: spelunker
spelunker_db_user: spelunker
spelunker_db_password: "{{ vault_spelunker_db_password }}"
hass_db_name: hass
hass_db_user: hass
hass_db_password: "{{ vault_hass_db_password }}"
nike_db_name: nike
nike_db_user: nike
nike_db_password: "{{ vault_nike_db_password }}"
# PostgreSQL admin password
postgres_password: "{{ vault_postgres_password }}"

View File

@@ -0,0 +1,141 @@
---
# Prospero Configuration - PPLG Observability & Admin Stack
# Services: pplg (PgAdmin, Prometheus, Loki, Grafana + HAProxy + OAuth2-Proxy)
services:
- alloy
- pplg
# Alloy
alloy_log_level: "warn"
# ============================================================================
# PPLG HAProxy Configuration
# ============================================================================
pplg_haproxy_user: haproxy
pplg_haproxy_group: haproxy
pplg_haproxy_uid: 800
pplg_haproxy_gid: 800
pplg_haproxy_domain: "ouranos.helu.ca"
pplg_haproxy_cert_path: /etc/haproxy/certs/ouranos.pem
pplg_haproxy_stats_port: 8404
pplg_haproxy_syslog_port: 51405
# ============================================================================
# Grafana
# ============================================================================
# Grafana Datasources
prometheus_datasource_name: Prospero-Prometheus
prometheus_host: prospero.incus
prometheus_port: 9090
prometheus_datasource_uid: prospero-prometheus
loki_datasource_name: Prospero-Loki
loki_host: prospero.incus
loki_port: 3100
loki_datasource_uid: prospero-loki
# Grafana Users
grafana_admin_name: "{{ vault_grafana_admin_name }}"
grafana_admin_login: "{{ vault_grafana_admin_login }}"
grafana_admin_password: "{{ vault_grafana_admin_password }}"
grafana_viewer_name: "{{ vault_grafana_viewer_name }}"
grafana_viewer_login: "{{ vault_grafana_viewer_login }}"
grafana_viewer_password: "{{ vault_grafana_viewer_password }}"
# Grafana OAuth (Casdoor SSO)
grafana_oauth_enabled: true
grafana_oauth_name: "Casdoor"
grafana_oauth_client_id: "{{ vault_grafana_oauth_client_id }}"
grafana_oauth_client_secret: "{{ vault_grafana_oauth_client_secret }}"
grafana_oauth_auth_url: "https://id.ouranos.helu.ca/login/oauth/authorize"
grafana_oauth_token_url: "https://id.ouranos.helu.ca/api/login/oauth/access_token"
grafana_oauth_api_url: "https://id.ouranos.helu.ca/api/userinfo"
grafana_oauth_scopes: "openid profile email"
grafana_root_url: "https://grafana.ouranos.helu.ca"
grafana_oauth_allow_sign_up: true
grafana_oauth_skip_tls_verify: false
# ============================================================================
# Prometheus
# ============================================================================
prometheus_user: prometheus
prometheus_group: prometheus
prometheus_scrape_interval: 15s
prometheus_evaluation_interval: 15s
alertmanager_host: prospero.incus
alertmanager_port: 9093
loki_metrics_port: 3100
prometheus_targets:
- 'oberon.incus:9100'
- 'portia.incus:9100'
- 'ariel.incus:9100'
- 'puck.incus:9100'
- 'puck.incus:25571'
- 'miranda.incus:9100'
- 'sycorax.incus:9100'
- 'prospero.incus:9100'
- 'rosalind.incus:9100'
# Prometheus OAuth2-Proxy Sidecar
prometheus_proxy_port: 9091
prometheus_oauth2_proxy_dir: /etc/oauth2-proxy-prometheus
prometheus_oauth2_proxy_version: "7.6.0"
prometheus_oauth2_oidc_issuer_url: "https://id.ouranos.helu.ca"
prometheus_oauth2_client_id: "{{ vault_prometheus_oauth2_client_id }}"
prometheus_oauth2_client_secret: "{{ vault_prometheus_oauth2_client_secret }}"
prometheus_oauth2_cookie_secret: "{{ vault_prometheus_oauth2_cookie_secret }}"
# ============================================================================
# Alertmanager
# ============================================================================
alertmanager_user: prometheus
alertmanager_group: prometheus
alertmanager_resolve_timeout: 5m
alertmanager_group_wait: 30s
alertmanager_group_interval: 5m
alertmanager_repeat_interval: 4h
pushover_user_key: "{{ vault_pushover_user_key }}"
pushover_api_token: "{{ vault_pushover_api_token }}"
pushover_priority: 1
pushover_retry: 30
pushover_expire: 3600
# ============================================================================
# Loki
# ============================================================================
loki_user: loki
loki_group: loki
loki_data_dir: /var/lib/loki
loki_config_dir: /etc/loki
loki_config_file: config.yml
loki_grpc_port: 9096
# ============================================================================
# PgAdmin (Gunicorn - no Apache)
# ============================================================================
pgadmin_user: pgadmin
pgadmin_group: pgadmin
pgadmin_port: 5050
pgadmin_data_dir: /var/lib/pgadmin
pgadmin_log_dir: /var/log/pgadmin
pgadmin_email: "{{ vault_pgadmin_email }}"
pgadmin_password: "{{ vault_pgadmin_password }}"
# PgAdmin OAuth (Casdoor SSO)
pgadmin_oauth_client_id: "{{ vault_pgadmin_oauth_client_id }}"
pgadmin_oauth_client_secret: "{{ vault_pgadmin_oauth_client_secret }}"
# ============================================================================
# Casdoor Metrics (for Prometheus scraping)
# ============================================================================
casdoor_metrics_host: "titania.incus"
casdoor_metrics_port: 22081
casdoor_prometheus_access_key: "{{ vault_casdoor_prometheus_access_key }}"
casdoor_prometheus_access_secret: "{{ vault_casdoor_prometheus_access_secret }}"

View File

@@ -0,0 +1,46 @@
---
# Puck Configuration - Application Runtime
# Services: alloy, docker, lxqt, jupyterlab
services:
- alloy
- docker
- gitea_runner
- jupyterlab
# Gitea Runner
gitea_runner_name: "puck-runner"
# Alloy
alloy_log_level: "warn"
angelia_syslog_port: 51421
sagittarius_syslog_port: 51431
athena_syslog_port: 51441
kairos_syslog_port: 51451
icarlos_syslog_port: 51461
spelunker_syslog_port: 51481
jupyterlab_syslog_port: 51491
# =============================================================================
# JupyterLab Configuration
# =============================================================================
jupyterlab_user: robert
jupyterlab_group: robert
jupyterlab_notebook_dir: /home/robert
jupyterlab_venv_dir: /home/robert/env/jupyter
# Ports
jupyterlab_port: 22081 # JupyterLab (localhost only)
jupyterlab_proxy_port: 22071 # OAuth2-Proxy (exposed to HAProxy)
# OAuth2-Proxy Configuration
jupyterlab_oauth2_proxy_dir: /etc/oauth2-proxy-jupyter
jupyterlab_oauth2_proxy_version: "7.6.0"
jupyterlab_domain: "ouranos.helu.ca"
jupyterlab_oauth2_oidc_issuer_url: "https://id.ouranos.helu.ca"
jupyterlab_oauth2_redirect_url: "https://jupyterlab.ouranos.helu.ca/oauth2/callback"
# OAuth2 Credentials (from vault)
jupyterlab_oauth_client_id: "{{ vault_jupyterlab_oauth_client_id }}"
jupyterlab_oauth_client_secret: "{{ vault_jupyterlab_oauth_client_secret }}"
jupyterlab_oauth2_cookie_secret: "{{ vault_jupyterlab_oauth2_cookie_secret }}"

View File

@@ -0,0 +1,155 @@
---
# Rosalind Configuration - GO, Node.js, PHP Apps
# Services: alloy, gitea, lobechat, nextcloud
services:
- alloy
- anythingllm
- docker
- gitea
- lobechat
- memcached
- nextcloud
# Alloy
alloy_log_level: "warn"
lobechat_syslog_port: 51461
# AnythingLLM Configuration
anythingllm_user: anythingllm
anythingllm_group: anythingllm
anythingllm_directory: /srv/anythingllm
anythingllm_port: 22084
# AnythingLLM Database (Portia PostgreSQL)
anythingllm_db_host: portia.incus
anythingllm_db_port: 5432
anythingllm_db_name: anythingllm
anythingllm_db_user: anythingllm
anythingllm_db_password: "{{ vault_anythingllm_db_password }}"
# AnythingLLM Security
anythingllm_jwt_secret: "{{ vault_anythingllm_jwt_secret }}"
anythingllm_sig_key: "{{ vault_anythingllm_sig_key }}"
anythingllm_sig_salt: "{{ vault_anythingllm_sig_salt }}"
# AnythingLLM LLM Provider (Generic OpenAI / llama-cpp)
anythingllm_llm_base_url: "http://nyx.helu.ca:25540/v1"
anythingllm_llm_model: "global.anthropic.claude-opus-4-6-v1"
anythingllm_llm_token_limit: 200000
anythingllm_llm_api_key: "ak_WX_7paeOky041GeX7MtQ51gam4lJsff3ghlClwdcbiI"
# AnythingLLM Embedding
anythingllm_embedding_engine: "generic-openai"
anythingllm_embedding_model: "Qwen3-Embedding-0.6B-Q8_0"
# AnythingLLM TTS (FastKokoro)
anythingllm_tts_provider: "openai"
anythingllm_tts_api_key: "not-needed"
anythingllm_tts_endpoint: "http://pan.helu.ca:22070/v1"
anythingllm_tts_model: "kokoro"
anythingllm_tts_voice: "am_echo"
# Gitea User and Directories
gitea_user: git
gitea_group: git
gitea_home_dir: /srv/git
gitea_work_dir: /var/lib/gitea
gitea_data_dir: /var/lib/gitea/data
gitea_lfs_dir: /var/lib/gitea/data/lfs
gitea_repo_root: /mnt/dv
gitea_config_file: /etc/gitea/app.ini
# Ports
gitea_web_port: 22082
gitea_ssh_port: 22022
gitea_metrics_port: 22092
# Network
gitea_domain: ouranos.helu.ca
gitea_root_url: https://gitea.ouranos.helu.ca/
# Database Configuration
gitea_db_type: postgres
gitea_db_host: portia.incus
gitea_db_port: 5432
gitea_db_name: gitea
gitea_db_user: gitea
gitea_db_password: "{{vault_gitea_db_password}}"
gitea_db_ssl_mode: disable
# Features
gitea_lfs_enabled: true
gitea_metrics_enabled: true
# Service Settings
gitea_disable_registration: true # Use Casdoor SSO instead
gitea_require_signin_view: false
# Security (vault secrets)
gitea_secret_key: "{{vault_gitea_secret_key}}"
gitea_lfs_jwt_secret: "{{vault_gitea_lfs_jwt_secret}}"
gitea_metrics_token: "{{vault_gitea_metrics_token}}"
# OAuth2 (Casdoor SSO)
gitea_oauth_enabled: true
gitea_oauth_name: "casdoor"
gitea_oauth_display_name: "Sign in with Casdoor"
gitea_oauth_client_id: "{{vault_gitea_oauth_client_id}}"
gitea_oauth_client_secret: "{{vault_gitea_oauth_client_secret}}"
# Auth URL uses external HAProxy address (user's browser)
gitea_oauth_auth_url: "https://id.ouranos.helu.ca/login/oauth/authorize"
# Token and userinfo URLs use internal Casdoor address (server-to-server)
gitea_oauth_token_url: "https://id.ouranos.helu.ca/api/login/oauth/access_token"
gitea_oauth_userinfo_url: "https://id.ouranos.helu.ca/api/userinfo"
gitea_oauth_scopes: "openid profile email"
# LobeChat Configuration
lobechat_user: lobechat
lobechat_group: lobechat
lobechat_directory: /srv/lobechat
lobechat_port: 22081
# Database Configuration
lobechat_db_host: portia.incus
lobechat_db_port: 5432
lobechat_db_name: lobechat
lobechat_db_user: lobechat
lobechat_db_password: "{{vault_lobechat_db_password}}"
lobechat_key_vaults_secret: "{{vault_lobechat_key_vaults_secret}}"
# Authentication
# NEXTAUTH_URL must be the public URL users access (not internal)
lobechat_nextauth_url: https://lobechat.ouranos.helu.ca
lobechat_next_auth_secret: "{{vault_lobechat_next_auth_secret}}"
lobechat_next_auth_sso_providers: casdoor
# Issuer must match exactly what Casdoor returns in .well-known/openid-configuration
lobechat_auth_casdoor_issuer: http://titania.incus:22081
lobechat_auth_casdoor_id: "{{vault_lobechat_auth_casdoor_id}}"
lobechat_auth_casdoor_secret: "{{vault_lobechat_auth_casdoor_secret}}"
# S3 Storage
lobechat_s3_endpoint: https://pan.helu.ca:8555
lobechat_s3_public_domain: https://pan.helu.ca:8555
lobechat_s3_access_key: "{{vault_lobechat_s3_access_key}}"
lobechat_s3_secret_key: "{{vault_lobechat_s3_secret_key}}"
lobechat_s3_bucket: lobechat
# Search
lobechat_searxng_url: http://oberon.incus:25599
# AI Models
lobechat_openai_proxy_url: http://sycorax.incus:25540/v1
lobechat_openai_key: "{{vault_lobechat_openai_api_key}}"
lobechat_ollama_proxy_url: http://perseus.helu.ca:11434
lobechat_anthropic_api_key: "{{vault_lobechat_anthropic_api_key}}"
lobechat_google_api_key: "{{vault_lobechat_google_api_key}}"
lobechat_app_url: https://lobechat.ouranos.helu.ca/
# Nextcloud Configuration
nextcloud_web_port: 22083
nextcloud_data_dir: /mnt/nextcloud
# Database Configuration
nextcloud_db_type: pgsql
nextcloud_db_host: portia.incus
nextcloud_db_port: 5432
nextcloud_db_name: nextcloud
nextcloud_db_user: nextcloud
nextcloud_db_password: "{{vault_nextcloud_db_password}}"
# Admin Configuration
nextcloud_admin_user: admin
nextcloud_admin_password: "{{vault_nextcloud_admin_password}}"
# Domain Configuration
nextcloud_domain: nextcloud.ouranos.helu.ca
# Instance secrets (generated during install)
nextcloud_instance_id: ""
nextcloud_password_salt: ""
nextcloud_secret: ""

View File

@@ -0,0 +1,71 @@
---
# Sycorax Configuration - Language Models
# Services: alloy, arke
services:
- alloy
- arke
# Alloy
alloy_log_level: "warn"
# Arke Configuration
arke_user: arke
arke_group: arke
arke_directory: /srv/arke
arke_port: 25540
# Server Configuration
arke_reload: false
# Memcached config
arke_memcached_host: localhost
arke_memcached_port: 11211
# Database Configuration
arke_db_host: portia.incus
arke_db_port: 5432
arke_db_name: arke
arke_db_user: arke
arke_db_password: "{{ vault_arke_db_password }}"
# NTTh API Configuration
arke_session_limit: 90
arke_session_ttl: 3600
arke_token_cache_ttl: 82800
ntth_token_1_app_name: "{{ vault_ntth_token_1_app_name }}"
ntth_token_1_app_id: "{{ vault_ntth_token_1_app_id }}"
ntth_token_1_app_secret: "{{ vault_ntth_token_1_app_secret }}"
ntth_token_2_app_name: "{{ vault_ntth_token_2_app_name }}"
ntth_token_2_app_id: "{{ vault_ntth_token_2_app_id }}"
ntth_token_2_app_secret: "{{ vault_ntth_token_2_app_secret }}"
ntth_token_3_app_name: "{{ vault_ntth_token_3_app_name }}"
ntth_token_3_app_id: "{{ vault_ntth_token_3_app_id }}"
ntth_token_3_app_secret: "{{ vault_ntth_token_3_app_secret }}"
ntth_token_4_app_name: "{{ vault_ntth_token_4_app_name }}"
ntth_token_4_app_id: "{{ vault_ntth_token_4_app_id }}"
ntth_token_4_app_secret: "{{ vault_ntth_token_4_app_secret }}"
# Embedding Provider Configuration
arke_embedding_provider: openai
# OpenAI-Compatible Configuration
arke_openai_embedding_base_url: http://pan.helu.ca:22079/v1
arke_openai_embedding_api_key: 0000
arke_openai_embedding_model: Qwen3-Embedding-0.6B-Q8_0
# Common Embedding Configuration
arke_embedding_batch_size: 16
arke_embedding_ubatch_size: 512
arke_embedding_max_context: 8192
arke_embedding_timeout: 30.0
# Memory System Configuration
arke_memory_enabled: true
arke_max_context_tokens: 8000
arke_similarity_threshold: 0.7
arke_min_importance_score: 0.7
# Monitoring Configuration
arke_prometheus_enabled: true
arke_metrics_port: 25540

View File

@@ -0,0 +1,217 @@
---
# Titania Configuration - Proxy & SSO Services
# Services: alloy, certbot, docker, haproxy, postgresql_ssl, casdoor
services:
- alloy
- certbot
- docker
- haproxy
- postgresql_ssl
- casdoor
# PostgreSQL SSL Configuration (dedicated database for identity services)
postgresql_ssl_postgres_password: "{{ vault_postgresql_ssl_postgres_password }}"
postgresql_ssl_port: 5432
postgresql_ssl_cert_path: /etc/postgresql/17/main/ssl/server.crt
# Alloy
alloy_log_level: "warn"
casdoor_syslog_port: 51401
haproxy_syslog_port: 51404
# Certbot Configuration (Let's Encrypt DNS-01 with Namecheap)
certbot_user: certbot
certbot_group: certbot
certbot_directory: /srv/certbot
certbot_email: webmaster@helu.ca
certbot_cert_name: ouranos.helu.ca
certbot_domains:
- "*.ouranos.helu.ca"
- "ouranos.helu.ca"
prometheus_node_exporter_text_directory: /var/lib/prometheus/node-exporter
# HAProxy Configuration
haproxy_user: haproxy
haproxy_group: haproxy
haproxy_uid: 800
haproxy_gid: 800
haproxy_directory: /srv/haproxy
haproxy_http_port: 8080
haproxy_https_port: 8443
haproxy_stats_port: 8404
haproxy_domain: "ouranos.helu.ca"
haproxy_cert_path: /etc/haproxy/certs/ouranos.pem
# HAProxy TCP Backend Definitions (mode tcp passthrough)
haproxy_tcp_backends:
- name: gitea_ssh
listen_port: 22022
backend_host: "rosalind.incus"
backend_port: 22022
# HAProxy Backend Definitions
haproxy_backends:
- subdomain: "" # Root domain (ouranos.helu.ca)
backend_host: "puck.incus"
backend_port: 22281
health_path: "/"
# timeout_server: "50s" # Optional override
- subdomain: "id" # Casdoor SSO (id.ouranos.helu.ca)
backend_host: "titania.incus"
backend_port: 22081
health_path: "/api/health"
redirect_root: "/login/heluca" # Redirect root to branded org login page
- subdomain: "openwebui"
backend_host: "oberon.incus"
backend_port: 22088
health_path: "/"
- subdomain: "anythingllm"
backend_host: "rosalind.incus"
backend_port: 22084
health_path: "/api/ping"
- subdomain: "arke"
backend_host: "sycorax.incus"
backend_port: 25540
health_path: "/health"
# SearXNG - routed through OAuth2-Proxy sidecar on Oberon
- subdomain: "searxng"
backend_host: "oberon.incus"
backend_port: 22073
health_path: "/ping"
- subdomain: "pgadmin"
backend_host: "prospero.incus"
backend_port: 443
health_path: "/misc/ping"
ssl_backend: true
- subdomain: "grafana"
backend_host: "prospero.incus"
backend_port: 443
health_path: "/api/health"
ssl_backend: true
- subdomain: "prometheus"
backend_host: "prospero.incus"
backend_port: 443
health_path: "/ping"
ssl_backend: true
- subdomain: "loki"
backend_host: "prospero.incus"
backend_port: 443
health_path: "/ready"
ssl_backend: true
- subdomain: "alertmanager"
backend_host: "prospero.incus"
backend_port: 443
health_path: "/-/healthy"
ssl_backend: true
- subdomain: "gitea"
backend_host: "rosalind.incus"
backend_port: 22082
health_path: "/api/healthz"
timeout_server: 120s
- subdomain: "lobechat"
backend_host: "rosalind.incus"
backend_port: 22081
health_path: "/chat"
- subdomain: "nextcloud"
backend_host: "rosalind.incus"
backend_port: 22083
health_path: "/status.php"
- subdomain: "angelia"
backend_host: "puck.incus"
backend_port: 22281
health_path: "/"
- subdomain: "athena"
backend_host: "puck.incus"
backend_port: 22481
health_path: "/ready/"
- subdomain: "kairos"
backend_host: "puck.incus"
backend_port: 22581
health_path: "/ready/"
- subdomain: "icarlos"
backend_host: "puck.incus"
backend_port: 22681
health_path: "/ready/"
- subdomain: "mcp-switchboard"
backend_host: "puck.incus"
backend_port: 22781
health_path: "/ready/"
- subdomain: "spelunker"
backend_host: "puck.incus"
backend_port: 22881
health_path: "/ready/"
- subdomain: "peitho"
backend_host: "puck.incus"
backend_port: 22981
health_path: "/ready/"
- subdomain: "jupyterlab"
backend_host: "puck.incus"
backend_port: 22071 # OAuth2-Proxy port
health_path: "/ping"
timeout_server: 300s # WebSocket support
- subdomain: "hass"
backend_host: "oberon.incus"
backend_port: 8123
health_path: "/api/"
timeout_server: 300s # WebSocket support for HA frontend
- subdomain: "smtp4dev"
backend_host: "oberon.incus"
backend_port: 22085
health_path: "/"
# Casdoor Configuration
casdoor_user: casdoor
casdoor_group: casdoor
casdoor_directory: /srv/casdoor
# Web Configuration
casdoor_port: 22081
casdoor_runmode: dev
casdoor_copyrequestbody: true
casdoor_drivername: postgres
# Database Configuration
casdoor_db_port: 5432
casdoor_db_name: casdoor
casdoor_db_user: casdoor
casdoor_db_password: "{{ vault_casdoor_db_password }}"
casdoor_db_sslmode: disable
casdoor_showsql: false
# Redis and Storage
casdoor_redis_endpoint: ""
casdoor_default_storage_provider: ""
# Authentication
casdoor_auth_state: "{{ vault_casdoor_auth_state }}"
# Origin must include port for internal OIDC endpoints to work correctly
casdoor_origin: "https://id.ouranos.helu.ca"
casdoor_origin_frontend: "https://id.ouranos.helu.ca"
# Timeouts and Ports
casdoor_inactive_timeout_minutes: 60
casdoor_ldap_server_port: 0
casdoor_ldaps_cert_id: ""
casdoor_ldaps_server_port: 0
casdoor_radius_server_port: 1812
casdoor_radius_default_organization: "built-in"
casdoor_radius_secret: "{{ vault_casdoor_radius_secret }}"

50
ansible/inventory/hosts Normal file
View File

@@ -0,0 +1,50 @@
---
# Ansible Inventory - Simplified
# Variables moved to:
# - host_vars/{hostname}.yml (host-specific config)
# - group_vars/all/vars.yml (common variables)
# Red Panda Approved Uranian Hosts
ubuntu:
hosts:
ariel.incus:
caliban.incus:
miranda.incus:
oberon.incus:
portia.incus:
prospero.incus:
puck.incus:
rosalind.incus:
sycorax.incus:
titania.incus:
korax.helu.ca:
# Service-specific groups for targeted deployments
agent_s:
hosts:
caliban.incus:
arke:
hosts:
sycorax.incus:
casdoor:
hosts:
titania.incus:
kernos:
hosts:
caliban.incus:
korax.helu.ca:
searxng:
hosts:
oberon.incus:
gitea:
hosts:
rosalind.incus:
mcpo:
hosts:
miranda.incus:

View File

@@ -0,0 +1,221 @@
---
# JupyterLab Deployment with OAuth2-Proxy Sidecar
# Deploys JupyterLab as systemd service with Casdoor SSO via oauth2-proxy
# Red Panda Approved
- name: Deploy JupyterLab
hosts: ubuntu
become: true
tasks:
- name: Check if host has jupyterlab service
ansible.builtin.set_fact:
has_jupyterlab_service: "{{'jupyterlab' in services}}"
- name: Skip hosts without jupyterlab service
ansible.builtin.meta: end_host
when: not has_jupyterlab_service
# =========================================================================
# System Dependencies
# =========================================================================
- name: Install system dependencies
ansible.builtin.apt:
name:
- python3
- python3-venv
- python3-dev
- python3-pip
- nodejs
- npm
- graphviz
- git
- curl
state: present
update_cache: true
# =========================================================================
# User Setup
# =========================================================================
- name: Ensure jupyterlab user exists
ansible.builtin.user:
name: "{{ jupyterlab_user }}"
group: "{{ jupyterlab_group }}"
shell: /bin/bash
create_home: true
state: present
- name: Create Notebooks directory
ansible.builtin.file:
path: "{{ jupyterlab_notebook_dir }}"
owner: "{{ jupyterlab_user }}"
group: "{{ jupyterlab_group }}"
state: directory
mode: '0755'
- name: Create JupyterLab config directory
ansible.builtin.file:
path: /etc/jupyterlab
owner: root
group: "{{ jupyterlab_group }}"
state: directory
mode: '0755'
- name: Create JupyterLab log directory
ansible.builtin.file:
path: /var/log/jupyterlab
owner: "{{ jupyterlab_user }}"
group: "{{ jupyterlab_group }}"
state: directory
mode: '0755'
# =========================================================================
# Python Virtual Environment
# =========================================================================
- name: Create virtual environment directory
ansible.builtin.file:
path: "{{ jupyterlab_venv_dir }}"
owner: "{{ jupyterlab_user }}"
group: "{{ jupyterlab_group }}"
state: directory
mode: '0755'
- name: Create virtual environment for JupyterLab
become_user: "{{ jupyterlab_user }}"
ansible.builtin.command:
cmd: "python3 -m venv {{ jupyterlab_venv_dir }}"
creates: "{{ jupyterlab_venv_dir }}/bin/activate"
- name: Upgrade pip in virtual environment
become_user: "{{ jupyterlab_user }}"
ansible.builtin.pip:
name:
- pip
- wheel
- setuptools
state: latest
virtualenv: "{{ jupyterlab_venv_dir }}"
- name: Install JupyterLab and core packages
become_user: "{{ jupyterlab_user }}"
ansible.builtin.pip:
name:
- jupyterlab
- jupyter-ai[all]
- langchain-ollama
- matplotlib
- plotly
- jupyter_contrib_nbextensions
- "jsonschema[format-nongpl]"
- python-mermaid
- ipywidgets
state: present
virtualenv: "{{ jupyterlab_venv_dir }}"
notify: restart jupyterlab
# =========================================================================
# Configuration Files
# =========================================================================
- name: Template JupyterLab configuration
ansible.builtin.template:
src: jupyter_lab_config.py.j2
dest: /etc/jupyterlab/jupyter_lab_config.py
owner: root
group: "{{ jupyterlab_group }}"
mode: '0644'
notify: restart jupyterlab
- name: Template JupyterLab systemd service
ansible.builtin.template:
src: jupyterlab.service.j2
dest: /etc/systemd/system/jupyterlab.service
owner: root
group: root
mode: '0644'
notify:
- reload systemd
- restart jupyterlab
# =========================================================================
# OAuth2-Proxy Sidecar
# =========================================================================
- name: Create oauth2-proxy directory
ansible.builtin.file:
path: "{{ jupyterlab_oauth2_proxy_dir }}"
owner: root
group: root
state: directory
mode: '0755'
- name: Download oauth2-proxy binary
ansible.builtin.get_url:
url: "https://github.com/oauth2-proxy/oauth2-proxy/releases/download/v{{ jupyterlab_oauth2_proxy_version }}/oauth2-proxy-v{{ jupyterlab_oauth2_proxy_version }}.linux-amd64.tar.gz"
dest: "/tmp/oauth2-proxy-v{{ jupyterlab_oauth2_proxy_version }}.tar.gz"
mode: '0644'
- name: Extract oauth2-proxy binary
ansible.builtin.unarchive:
src: "/tmp/oauth2-proxy-v{{ jupyterlab_oauth2_proxy_version }}.tar.gz"
dest: /tmp
remote_src: true
creates: "/tmp/oauth2-proxy-v{{ jupyterlab_oauth2_proxy_version }}.linux-amd64/oauth2-proxy"
- name: Install oauth2-proxy binary
ansible.builtin.copy:
src: "/tmp/oauth2-proxy-v{{ jupyterlab_oauth2_proxy_version }}.linux-amd64/oauth2-proxy"
dest: /usr/local/bin/oauth2-proxy
owner: root
group: root
mode: '0755'
remote_src: true
- name: Template oauth2-proxy configuration
ansible.builtin.template:
src: oauth2-proxy-jupyter.cfg.j2
dest: "{{ jupyterlab_oauth2_proxy_dir }}/oauth2-proxy.cfg"
owner: root
group: root
mode: '0600'
notify: restart oauth2-proxy-jupyter
- name: Template oauth2-proxy systemd service
ansible.builtin.template:
src: oauth2-proxy-jupyter.service.j2
dest: /etc/systemd/system/oauth2-proxy-jupyter.service
owner: root
group: root
mode: '0644'
notify:
- reload systemd
- restart oauth2-proxy-jupyter
# =========================================================================
# Service Management
# =========================================================================
- name: Enable and start JupyterLab service
ansible.builtin.systemd:
name: jupyterlab
enabled: true
state: started
daemon_reload: true
- name: Enable and start OAuth2-Proxy service
ansible.builtin.systemd:
name: oauth2-proxy-jupyter
enabled: true
state: started
daemon_reload: true
handlers:
- name: reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: restart jupyterlab
ansible.builtin.systemd:
name: jupyterlab
state: restarted
- name: restart oauth2-proxy-jupyter
ansible.builtin.systemd:
name: oauth2-proxy-jupyter
state: restarted

View File

@@ -0,0 +1,62 @@
# JupyterLab Configuration
# Deployed via Ansible - Do not edit manually
# Red Panda Approved
# =============================================================================
# Server Settings
# =============================================================================
# Allow connections from reverse proxy
c.ServerApp.allow_remote_access = True
c.ServerApp.local_hostnames = ['localhost', '127.0.0.1', 'jupyter.{{ jupyterlab_domain }}']
# Disable browser launch
c.ServerApp.open_browser = False
# Disable token authentication (OAuth2-Proxy handles auth)
c.ServerApp.token = ''
c.ServerApp.password = ''
# Base URL for reverse proxy
c.ServerApp.base_url = '/'
# Trust X-Forwarded headers from OAuth2-Proxy
c.ServerApp.trust_xheaders = True
# =============================================================================
# WebSocket Configuration (for reverse proxy)
# =============================================================================
# Allow WebSocket connections from any origin (handled by OAuth2-Proxy)
c.ServerApp.allow_origin = '*'
c.ServerApp.allow_credentials = True
# Disable XSRF for API (OAuth2-Proxy handles CSRF)
c.ServerApp.disable_check_xsrf = True
# =============================================================================
# Notebook Settings
# =============================================================================
# Default notebook directory
c.ServerApp.root_dir = '{{ jupyterlab_notebook_dir }}'
# Allow hidden files
c.ContentsManager.allow_hidden = True
# =============================================================================
# Terminal Settings
# =============================================================================
# Enable terminal
c.ServerApp.terminals_enabled = True
# =============================================================================
# Logging
# =============================================================================
# Log level
c.Application.log_level = 'INFO'
# Log format
c.Application.log_format = '[%(levelname)s %(asctime)s %(name)s] %(message)s'

View File

@@ -0,0 +1,34 @@
[Unit]
Description=JupyterLab Server
After=network.target
Wants=oauth2-proxy-jupyter.service
[Service]
Type=simple
User={{ jupyterlab_user }}
Group={{ jupyterlab_group }}
WorkingDirectory={{ jupyterlab_notebook_dir }}
ExecStart={{ jupyterlab_venv_dir }}/bin/jupyter-lab \
--config=/etc/jupyterlab/jupyter_lab_config.py \
--ip=127.0.0.1 \
--port={{ jupyterlab_port }} \
--no-browser \
--notebook-dir={{ jupyterlab_notebook_dir }}
Environment="PATH={{ jupyterlab_venv_dir }}/bin:/usr/local/bin:/usr/bin:/bin"
Restart=on-failure
RestartSec=10
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=jupyterlab
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,68 @@
# OAuth2-Proxy Configuration for JupyterLab
# Authenticates users via Casdoor OIDC before proxying to JupyterLab
# Red Panda Approved
# Provider Configuration (Casdoor OIDC)
provider = "oidc"
provider_display_name = "Casdoor"
oidc_issuer_url = "{{ jupyterlab_oauth2_oidc_issuer_url }}"
client_id = "{{ jupyterlab_oauth_client_id }}"
client_secret = "{{ jupyterlab_oauth_client_secret }}"
# Redirect URL after authentication
redirect_url = "{{ jupyterlab_oauth2_redirect_url }}"
# Upstream service (JupyterLab on localhost)
upstreams = [
"http://127.0.0.1:{{ jupyterlab_port }}"
]
# Session/Cookie Configuration
cookie_secret = "{{ jupyterlab_oauth2_cookie_secret }}"
cookie_name = "_oauth2_proxy_jupyter"
cookie_secure = true
cookie_httponly = true
cookie_samesite = "lax"
cookie_domains = [
".{{ jupyterlab_domain }}"
]
# Authentication settings
email_domains = ["*"]
oidc_email_claim = "email"
oidc_groups_claim = "groups"
# Session settings
session_store_type = "cookie"
cookie_expire = "168h"
cookie_refresh = "1h"
# Request settings - pass user info to JupyterLab
pass_access_token = false
pass_authorization_header = false
set_authorization_header = false
set_xauthrequest = true
# Logging
request_logging = true
auth_logging = true
standard_logging = true
# Network settings
http_address = "0.0.0.0:{{ jupyterlab_proxy_port }}"
reverse_proxy = true
real_client_ip_header = "X-Forwarded-For"
# Skip authentication for health check endpoints
skip_auth_routes = [
"^/api/status$",
"^/healthz$"
]
# OIDC specific settings
skip_provider_button = true
oidc_extra_audiences = []
insecure_oidc_allow_unverified_email = true
# SSL verification (internal Casdoor uses valid certs)
ssl_insecure_skip_verify = false

View File

@@ -0,0 +1,23 @@
[Unit]
Description=OAuth2-Proxy for JupyterLab
After=network.target jupyterlab.service
Requires=jupyterlab.service
[Service]
Type=simple
ExecStart=/usr/local/bin/oauth2-proxy --config={{ jupyterlab_oauth2_proxy_dir }}/oauth2-proxy.cfg
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=oauth2-proxy-jupyter
[Install]
WantedBy=multi-user.target

22
ansible/kernos/.env.j2 Normal file
View File

@@ -0,0 +1,22 @@
# Kernos Environment Configuration
# HTTP-enabled MCP shell server using FastMCP
# ============================================================================
# Server Configuration
# ============================================================================
HOST={{ kernos_host | default('0.0.0.0') }}
PORT={{ kernos_port }}
# ============================================================================
# Logging Configuration
# ============================================================================
LOG_FORMAT={{ kernos_log_format | default('json') }}
LOG_LEVEL={{ kernos_log_level | default('INFO') }}
ENVIRONMENT={{ kernos_environment | default('production') }}
# ============================================================================
# Security Configuration
# ============================================================================
# Comma-separated whitelist of allowed commands
# Commands after shell operators (;, &&, ||, |) are also validated
ALLOW_COMMANDS={{ kernos_allow_commands }}

180
ansible/kernos/deploy.yml Normal file
View File

@@ -0,0 +1,180 @@
---
- name: Deploy Kernos MCP Shell Server
hosts: kernos
vars:
ansible_common_remote_group: "{{kernos_group}}"
allow_world_readable_tmpfiles: true
tasks:
- name: Create Kernos group
become: true
ansible.builtin.group:
name: "{{kernos_group}}"
state: present
- name: Create kernos user
become: true
ansible.builtin.user:
name: "{{kernos_user}}"
group: "{{kernos_group}}"
home: "/home/{{kernos_user}}"
shell: /bin/bash
system: false
create_home: true
- name: Add remote_user to kernos group
become: true
ansible.builtin.user:
name: "{{remote_user}}"
groups: "{{kernos_group}}"
append: true
- name: Reset connection to pick up new group membership
ansible.builtin.meta: reset_connection
- name: Create required directories
become: true
ansible.builtin.file:
path: "{{kernos_directory}}"
owner: "{{kernos_user}}"
group: "{{kernos_group}}"
state: directory
mode: '750'
- name: Ensure tar is installed for unarchive task
become: true
ansible.builtin.apt:
name:
- tar
state: present
update_cache: true
- name: Ensure Python, Python Dev, Venv module is installed
become: true
ansible.builtin.apt:
name: [python3, python3-venv, python3-dev]
state: present
update_cache: true
- name: Transfer and unarchive git archive
become: true
ansible.builtin.unarchive:
src: "~/rel/kernos_{{kernos_rel}}.tar"
dest: "{{kernos_directory}}"
owner: "{{kernos_user}}"
group: "{{kernos_group}}"
mode: '550'
notify: restart kernos
- name: Ensure venv directory ownership is correct
become: true
ansible.builtin.file:
path: "{{kernos_directory}}/.venv"
owner: "{{kernos_user}}"
group: "{{kernos_group}}"
state: directory
recurse: true
when: ansible_facts['file'] is defined or true
- name: Create virtual environment for Kernos
become: true
become_user: "{{kernos_user}}"
ansible.builtin.command:
cmd: "python3 -m venv {{kernos_directory}}/.venv/"
creates: "{{kernos_directory}}/.venv/bin/activate"
- name: Install wheel in virtual environment
become: true
become_user: "{{kernos_user}}"
ansible.builtin.pip:
name:
- wheel
state: latest
virtualenv: "{{kernos_directory}}/.venv"
- name: Install pyproject.toml dependencies in virtualenv
become: true
become_user: "{{kernos_user}}"
ansible.builtin.pip:
chdir: "{{kernos_directory}}"
name: .
virtualenv: "{{kernos_directory}}/.venv"
virtualenv_command: python3 -m venv
notify: restart kernos
- name: Template Kernos .env configuration
become: true
ansible.builtin.template:
src: .env.j2
dest: "{{kernos_directory}}/.env"
owner: "{{kernos_user}}"
group: "{{kernos_group}}"
mode: '640'
notify: restart kernos
- name: Template systemd service file
become: true
ansible.builtin.template:
src: kernos.service.j2
dest: /etc/systemd/system/kernos.service
owner: root
group: root
mode: '644'
notify: restart kernos
- name: Enable and start kernos service
become: true
ansible.builtin.systemd:
name: kernos
enabled: true
state: started
daemon_reload: true
- name: Flush handlers to restart service before validation
ansible.builtin.meta: flush_handlers
- name: Validate Kernos liveness endpoint
ansible.builtin.uri:
url: "http://localhost:{{kernos_port}}/live"
status_code: 200
return_content: true
register: live_check
retries: 5
delay: 5
until: live_check.status == 200
- name: Validate Kernos readiness endpoint
ansible.builtin.uri:
url: "http://localhost:{{kernos_port}}/ready"
status_code: 200
return_content: true
register: ready_check
retries: 5
delay: 5
until: ready_check.status == 200
- name: Validate Kernos health endpoint
ansible.builtin.uri:
url: "http://localhost:{{kernos_port}}/health"
status_code: 200
return_content: true
register: health_check
retries: 5
delay: 5
until: health_check.status == 200
- name: Validate Kernos /metrics endpoint
ansible.builtin.uri:
url: "http://localhost:{{kernos_port}}/metrics"
status_code: 200
return_content: false
register: metrics_check
retries: 5
delay: 5
until: metrics_check.status == 200
handlers:
- name: restart kernos
become: true
ansible.builtin.systemd:
name: kernos
state: restarted

View File

@@ -0,0 +1,23 @@
[Unit]
Description=Kernos MCP Server
After=network.target
[Service]
Type=simple
User=nobody
Group=nogroup
WorkingDirectory=/srv/kernos
ExecStart=/srv/kernos/.venv/bin/kernos
EnvironmentFile=/srv/kernos/.env
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=true
ProtectSystem=strict
ProtectHome=read-only
PrivateTmp=false
ReadWritePaths=/
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,23 @@
[Unit]
Description=Kernos MCP Server
After=network.target
[Service]
Type=simple
User={{kernos_user}}
Group={{kernos_group}}
WorkingDirectory={{kernos_directory}}
ExecStart={{kernos_directory}}/.venv/bin/kernos
EnvironmentFile={{kernos_directory}}/.env
Restart=on-failure
RestartSec=5
# Security hardening
NoNewPrivileges=false
ProtectSystem=false
ProtectHome=false
PrivateTmp=false
ReadWritePaths=/
[Install]
WantedBy=multi-user.target

47
ansible/kernos/stage.yml Normal file
View File

@@ -0,0 +1,47 @@
---
- name: Stage Kernos release tarball
hosts: localhost
gather_facts: false
vars:
archive_path: "{{rel_dir}}/kernos_{{kernos_rel}}.tar"
kernos_repo_url: "ssh://robert@clio.helu.ca:18677/mnt/dev/kernos"
kernos_repo_dir: "{{repo_dir}}/kernos"
tasks:
- name: Ensure release directory exists
file:
path: "{{rel_dir}}"
state: directory
mode: '755'
- name: Ensure repo directory exists
file:
path: "{{repo_dir}}"
state: directory
mode: '755'
- name: Clone Kernos repository if not present
ansible.builtin.git:
repo: "{{kernos_repo_url}}"
dest: "{{kernos_repo_dir}}"
version: "{{kernos_rel}}"
accept_hostkey: true
register: git_clone
ignore_errors: true
- name: Fetch all remote branches and tags
ansible.builtin.command: git fetch --all
args:
chdir: "{{kernos_repo_dir}}"
when: git_clone is not changed
- name: Pull latest changes
ansible.builtin.command: git pull
args:
chdir: "{{kernos_repo_dir}}"
when: git_clone is not changed
- name: Create Kernos archive for specified release
ansible.builtin.command: git archive -o "{{archive_path}}" "{{kernos_rel}}"
args:
chdir: "{{kernos_repo_dir}}"

View File

@@ -0,0 +1,241 @@
# add a access code to lock your lobe-chat application, you can set a long password to avoid leaking. If this value contains a comma, it is a password array.
# ACCESS_CODE=lobe66
# Specify your API Key selection method, currently supporting `random` and `turn`.
# API_KEY_SELECT_MODE=random
########################################
########## AI Provider Service #########
########################################
### OpenAI ###
# you openai api key
OPENAI_API_KEY=sk-xxxxxxxxx
# use a proxy to connect to the OpenAI API
# OPENAI_PROXY_URL=https://api.openai.com/v1
# add your custom model name, multi model separate by comma. for example gpt-3.5-1106,gpt-4-1106
# OPENAI_MODEL_LIST=gpt-3.5-turbo
### Azure OpenAI ###
# you can learn azure OpenAI Service on https://learn.microsoft.com/en-us/azure/ai-services/openai/overview
# use Azure OpenAI Service by uncomment the following line
# The API key you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
# AZURE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The endpoint you applied for on the Azure OpenAI account page, which can be found in the "Keys and Endpoints" section.
# AZURE_ENDPOINT=https://docs-test-001.openai.azure.com
# Azure's API version, follows the YYYY-MM-DD format
# AZURE_API_VERSION=2024-10-21
### Anthropic Service ####
# ANTHROPIC_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# use a proxy to connect to the Anthropic API
# ANTHROPIC_PROXY_URL=https://api.anthropic.com
### Google AI ####
# GOOGLE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### AWS Bedrock ###
# AWS_REGION=us-east-1
# AWS_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxx
# AWS_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Ollama AI ####
# You can use ollama to get and run LLM locally, learn more about it via https://github.com/ollama/ollama
# The local/remote ollama service url
# OLLAMA_PROXY_URL=http://127.0.0.1:11434
# OLLAMA_MODEL_LIST=your_ollama_model_names
### OpenRouter Service ###
# OPENROUTER_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# OPENROUTER_MODEL_LIST=model1,model2,model3
### Mistral AI ###
# MISTRAL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Perplexity Service ###
# PERPLEXITY_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Groq Service ####
# GROQ_API_KEY=gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#### 01.AI Service ####
# ZEROONE_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### TogetherAI Service ###
# TOGETHERAI_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### ZhiPu AI ###
# ZHIPU_API_KEY=xxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxx
### Moonshot AI ####
# MOONSHOT_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Minimax AI ####
# MINIMAX_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### DeepSeek AI ####
# DEEPSEEK_PROXY_URL=https://api.deepseek.com/v1
# DEEPSEEK_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Qiniu AI ####
# QINIU_PROXY_URL=https://api.qnaigc.com/v1
# QINIU_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Qwen AI ####
# QWEN_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### Cloudflare Workers AI ####
# CLOUDFLARE_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# CLOUDFLARE_BASE_URL_OR_ACCOUNT_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### SiliconCloud AI ####
# SILICONCLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### TencentCloud AI ####
# TENCENT_CLOUD_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### PPIO ####
# PPIO_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
### INFINI-AI ###
# INFINIAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
############ Market Service ############
########################################
# The LobeChat agents market index url
# AGENTS_INDEX_URL=https://chat-agents.lobehub.com
########################################
############ Plugin Service ############
########################################
# The LobeChat plugins store index url
# PLUGINS_INDEX_URL=https://chat-plugins.lobehub.com
# set the plugin settings
# the format is `plugin-identifier:key1=value1;key2=value2`, multiple settings fields are separated by semicolons `;`, multiple plugin settings are separated by commas `,`.
# PLUGIN_SETTINGS=search-engine:SERPAPI_API_KEY=xxxxx
########################################
####### Doc / Changelog Service ########
########################################
# Use in Changelog / Document service cdn url prefix
# DOC_S3_PUBLIC_DOMAIN=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Use in dev cdn workflow
# DOC_S3_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# DOC_S3_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
########################################
##### S3 Object Storage Service ########
########################################
# S3 keys
# S3_ACCESS_KEY_ID=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# S3_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# Bucket name
# S3_BUCKET=lobechat
# Bucket request endpoint
# S3_ENDPOINT=https://xxxxxxxxxxxxxxxxxxxxxxxxxxxxx.r2.cloudflarestorage.com
# Public access domain for the bucket
# S3_PUBLIC_DOMAIN=https://s3-for-lobechat.your-domain.com
# Bucket region, such as us-west-1, generally not needed to add
# but some service providers may require configuration
# S3_REGION=us-west-1
########################################
############ Auth Service ##############
########################################
# Clerk related configurations
# Clerk public key and secret key
#NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_live_xxxxxxxxxxx
#CLERK_SECRET_KEY=sk_live_xxxxxxxxxxxxxxxxxxxxxx
# you need to config the clerk webhook secret key if you want to use the clerk with database
#CLERK_WEBHOOK_SECRET=whsec_xxxxxxxxxxxxxxxxxxxxxx
# NextAuth related configurations
# NEXT_PUBLIC_ENABLE_NEXT_AUTH=1
# NEXT_AUTH_SECRET=
# Auth0 configurations
# AUTH_AUTH0_ID=
# AUTH_AUTH0_SECRET=
# AUTH_AUTH0_ISSUER=https://your-domain.auth0.com
########################################
########## Server Database #############
########################################
# Specify the service mode as server if you want to use the server database
# NEXT_PUBLIC_SERVICE_MODE=server
# Postgres database URL
# DATABASE_URL=postgres://username:password@host:port/database
# use `openssl rand -base64 32` to generate a key for the encryption of the database
# we use this key to encrypt the user api key and proxy url
#KEY_VAULTS_SECRET=xxxxx/xxxxxxxxxxxxxx=
# Specify the Embedding model and Reranker model(unImplemented)
# DEFAULT_FILES_CONFIG="embedding_model=openai/embedding-text-3-small,reranker_model=cohere/rerank-english-v3.0,query_mode=full_text"
########################################
########## MCP Service Config ##########
########################################
# MCP tool call timeout (milliseconds)
# MCP_TOOL_TIMEOUT=60000

View File

@@ -0,0 +1,82 @@
---
- name: Deploy LobeChat to Dev Environment
hosts: ubuntu
tasks:
- name: Check if host has lobechat service
ansible.builtin.set_fact:
has_lobechat_service: "{{ 'lobechat' in services | default([]) }}"
- name: Skip hosts without lobechat service
ansible.builtin.meta: end_host
when: not has_lobechat_service
- name: Create lobechat group
become: true
ansible.builtin.group:
name: "{{lobechat_user}}"
- name: Create lobechat user
become: true
ansible.builtin.user:
name: "{{lobechat_user}}"
comment: "{{lobechat_user}}"
group: "{{lobechat_group}}"
system: true
- name: Add group lobechat to user ponos
become: true
ansible.builtin.user:
name: ponos
groups: "{{lobechat_group}}"
append: true
- name: Create lobechat directory
become: true
ansible.builtin.file:
path: "{{lobechat_directory}}"
owner: "{{lobechat_user}}"
group: "{{lobechat_group}}"
state: directory
mode: '750'
- name: Template docker-compose file
become: true
ansible.builtin.template:
src: docker-compose.yml.j2
dest: "{{lobechat_directory}}/docker-compose.yml"
owner: "{{lobechat_user}}"
group: "{{lobechat_group}}"
mode: '550'
register: lobechat_compose
- name: Reset SSH connection to apply group changes
meta: reset_connection
- name: Start LobeChat service
become: true
community.docker.docker_compose_v2:
project_src: "{{lobechat_directory}}"
state: present
pull: always
- name: Restart LobeChat if configuration changed
become: true
community.docker.docker_compose_v2:
project_src: "{{lobechat_directory}}"
state: restarted
when: lobechat_compose.changed
- name: Wait for LobeChat to be healthy
ansible.builtin.uri:
url: "http://localhost:{{lobechat_port}}/chat"
method: GET
status_code: 200
register: lobechat_health
until: lobechat_health.status == 200
retries: 30
delay: 5
delegate_to: "{{inventory_hostname}}"
- name: Display LobeChat status
ansible.builtin.debug:
msg: "LobeChat is running at http://{{inventory_hostname}}:{{lobechat_port}}"

View File

@@ -0,0 +1,36 @@
services:
lobe-chat:
image: lobehub/lobe-chat-database:latest
pull_policy: always
environment:
- DATABASE_URL=postgresql://{{lobechat_db_user}}:{{lobechat_db_password}}@{{lobechat_db_host}}:{{lobechat_db_port}}/{{lobechat_db_name}}
- KEY_VAULTS_SECRET={{lobechat_key_vaults_secret}}
- NEXTAUTH_URL={{lobechat_nextauth_url}}
- NEXT_AUTH_SECRET={{lobechat_next_auth_secret}}
- NEXT_AUTH_SSO_PROVIDERS={{lobechat_next_auth_sso_providers}}
- AUTH_CASDOOR_ISSUER={{lobechat_auth_casdoor_issuer}}
- AUTH_CASDOOR_ID={{lobechat_auth_casdoor_id}}
- AUTH_CASDOOR_SECRET={{lobechat_auth_casdoor_secret}}
# Trust self-signed certificates for internal OIDC communication
- NODE_TLS_REJECT_UNAUTHORIZED=0
- S3_ENDPOINT={{lobechat_s3_endpoint}}
- S3_PUBLIC_DOMAIN={{lobechat_s3_public_domain}}
- S3_ACCESS_KEY_ID={{lobechat_s3_access_key}}
- S3_SECRET_ACCESS_KEY={{lobechat_s3_secret_key}}
- S3_BUCKET={{lobechat_s3_bucket}}
- SEARXNG_URL={{lobechat_searxng_url}}
- OPENAI_PROXY_URL={{lobechat_openai_proxy_url}}
- OPENAI_API_KEY={{lobechat_openai_key}}
- OLLAMA_PROXY_URL={{lobechat_ollama_proxy_url}}
- ANTHROPIC_API_KEY={{lobechat_anthropic_api_key}}
- GOOGLE_API_KEY={{lobechat_google_api_key}}
- APP_URL={{lobechat_app_url}}
logging:
driver: syslog
options:
syslog-address: "tcp://127.0.0.1:{{lobechat_syslog_port}}"
syslog-format: "{{syslog_format}}"
tag: "lobechat"
ports:
- "{{lobechat_port}}:3210"
restart: unless-stopped

View File

@@ -0,0 +1,41 @@
auth_enabled: false
server:
http_listen_port: {{ loki_port }}
grpc_listen_port: {{ loki_grpc_port }}
common:
path_prefix: {{ loki_data_dir }}
storage:
filesystem:
chunks_directory: {{ loki_data_dir }}/chunks
rules_directory: {{ loki_data_dir }}/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
schema_config:
configs:
- from: 2024-04-01
object_store: filesystem
store: tsdb
schema: v13
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://{{ alertmanager_host }}:{{ alertmanager_port }}
# Red Panda Approved Configuration
analytics:
reporting_enabled: false

Some files were not shown because too many files have changed in this diff Show More