docs: rewrite README with structured overview and quick start guide

Replaces the minimal project description with a comprehensive README
including a component overview table, quick start instructions, common
Ansible operations, and links to detailed documentation. Aligns with
Red Panda Approval™ standards.
This commit is contained in:
2026-03-03 12:49:06 +00:00
parent c7be03a743
commit b4d60f2f38
219 changed files with 34586 additions and 2 deletions

View File

@@ -0,0 +1,57 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "neo4j_logs" {
listener {
address = "127.0.0.1:{{neo4j_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,24 @@
// Default Alloy Configuration
// Standard system monitoring and log collection
logging {
level = "{{alloy_log_level}}"
format = "logfmt"
}
// Loki log forwarding
loki.write "default" {
endpoint {
url = "{{ loki_url }}"
}
}
// System log collection
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{ inventory_hostname }}",
environment = "{{ deployment_environment }}",
}
}

116
ansible/alloy/deploy.yml Normal file
View File

@@ -0,0 +1,116 @@
---
- name: Deploy Alloy to All Ubuntu Hosts
hosts: ubuntu
tasks:
- name: Check if host has alloy service
ansible.builtin.set_fact:
has_alloy_service: "{{'alloy' in services}}"
- name: Skip hosts without alloy service
ansible.builtin.meta: end_host
when: not has_alloy_service
- name: Add Grafana repository
become: true
ansible.builtin.deb822_repository:
name: grafana
types: [deb]
uris: https://apt.grafana.com
suites: [stable]
components: [main]
signed_by: https://apt.grafana.com/gpg.key
state: present
- name: Install Alloy
become: true
ansible.builtin.apt:
name: alloy
state: present
update_cache: true
- name: Check for host-specific Alloy config
ansible.builtin.stat:
path: "{{playbook_dir}}/{{inventory_hostname_short}}/config.alloy.j2"
register: host_specific_config
delegate_to: localhost
connection: local
- name: Create Alloy configuration (host-specific)
become: true
ansible.builtin.template:
src: "{{ inventory_hostname_short }}/config.alloy.j2"
dest: /etc/alloy/config.alloy
owner: alloy
group: alloy
mode: '644'
when: host_specific_config.stat.exists
notify: restart alloy
- name: Create Alloy configuration (default)
become: true
ansible.builtin.template:
src: config.alloy.j2
dest: /etc/alloy/config.alloy
owner: alloy
group: alloy
mode: '644'
when: not host_specific_config.stat.exists
notify: restart alloy
- name: Check if host has docker service
ansible.builtin.set_fact:
has_docker_service: "{{'docker' in services}}"
- name: Add alloy user to docker group for cAdvisor
become: true
ansible.builtin.user:
name: alloy
groups: docker
append: true
when: has_docker_service
notify: restart alloy
- name: Check if host has gitea service
ansible.builtin.set_fact:
has_gitea_service: "{{'gitea' in services}}"
- name: Add alloy user to gitea group for log collection
become: true
ansible.builtin.user:
name: alloy
groups: git
append: true
when: has_gitea_service
notify: restart alloy
- name: Enable and start Alloy service
become: true
ansible.builtin.systemd:
name: alloy
enabled: true
state: started
daemon_reload: true
- name: Flush handlers to ensure Alloy is restarted if needed
ansible.builtin.meta: flush_handlers
- name: Verify Alloy service is running
become: true
ansible.builtin.systemd:
name: alloy
register: alloy_service_status
- name: Confirm Alloy service is active
ansible.builtin.assert:
that:
- alloy_service_status.status.ActiveState == "active"
fail_msg: "Alloy service is not running (state: {{ alloy_service_status.status.ActiveState }})"
success_msg: "Alloy service is running"
handlers:
- name: restart alloy
become: true
ansible.builtin.systemd:
name: alloy
state: restarted

View File

@@ -0,0 +1,131 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.relabel.journal_apps.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.relabel "journal_apps" {
forward_to = [loki.write.default.receiver]
rule {
source_labels = ["__journal__systemd_unit"]
regex = "mcpo\\.service"
target_label = "job"
replacement = "mcpo"
}
rule {
source_labels = ["__journal__systemd_unit"]
regex = "mcpo\\.service"
target_label = "app"
replacement = "mcpo"
}
}
loki.source.syslog "argos_logs" {
listener {
address = "127.0.0.1:{{argos_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "argos",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "grafana_mcp_logs" {
listener {
address = "127.0.0.1:{{grafana_mcp_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "grafana_mcp",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "neo4j_cypher_logs" {
listener {
address = "127.0.0.1:{{neo4j_cypher_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j-cypher",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "neo4j_memory_logs" {
listener {
address = "127.0.0.1:{{neo4j_memory_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "neo4j-memory",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "gitea_mcp_logs" {
listener {
address = "127.0.0.1:{{gitea_mcp_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "gitea-mcp",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "mcp_docker_host"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,98 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "rabbitmq_logs" {
listener {
address = "127.0.0.1:{{rabbitmq_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "rabbitmq",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "searxng_logs" {
listener {
address = "127.0.0.1:{{searxng_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "searxng",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "smtp4dev_logs" {
listener {
address = "127.0.0.1:{{smtp4dev_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "smtp4dev",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.scrape "hass" {
targets = [{
__address__ = "127.0.0.1:{{hass_port}}",
job = "hass",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}]
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "60s"
metrics_path = "/api/prometheus"
bearer_token = "{{hass_metrics_token}}"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}

View File

@@ -0,0 +1,195 @@
// Prospero Alloy Configuration
// Red Panda Approved 🐼
// Services: PPLG stack (Grafana, Prometheus, Loki, Alertmanager, PgAdmin, HAProxy, OAuth2-Proxy)
logging {
level = "{{alloy_log_level}}"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
// System log files
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
// PPLG HAProxy syslog receiver (HAProxy syslog → Alloy → Loki)
loki.source.syslog "pplg_haproxy" {
listener {
address = "127.0.0.1:{{pplg_haproxy_syslog_port}}"
protocol = "tcp"
labels = {
job = "pplg-haproxy",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
// Journal relabeling - assign dedicated job labels per systemd unit
loki.relabel "journal" {
forward_to = []
// Expose the systemd unit as a label
rule {
source_labels = ["__journal__systemd_unit"]
target_label = "unit"
}
// Grafana
rule {
source_labels = ["__journal__systemd_unit"]
regex = "grafana-server\\.service"
target_label = "job"
replacement = "grafana"
}
// Prometheus
rule {
source_labels = ["__journal__systemd_unit"]
regex = "prometheus\\.service"
target_label = "job"
replacement = "prometheus"
}
// Loki
rule {
source_labels = ["__journal__systemd_unit"]
regex = "loki\\.service"
target_label = "job"
replacement = "loki"
}
// Alertmanager
rule {
source_labels = ["__journal__systemd_unit"]
regex = "alertmanager\\.service"
target_label = "job"
replacement = "alertmanager"
}
// PgAdmin
rule {
source_labels = ["__journal__systemd_unit"]
regex = "pgadmin\\.service"
target_label = "job"
replacement = "pgadmin"
}
// OAuth2-Proxy (Prometheus UI)
rule {
source_labels = ["__journal__systemd_unit"]
regex = "oauth2-proxy-prometheus\\.service"
target_label = "job"
replacement = "oauth2-proxy-prometheus"
}
// Alloy
rule {
source_labels = ["__journal__systemd_unit"]
regex = "alloy\\.service"
target_label = "job"
replacement = "alloy"
}
// Default job for unmatched units
rule {
source_labels = ["__journal__systemd_unit"]
regex = ".+"
target_label = "job"
replacement = "systemd"
}
}
// Systemd journal logs with per-service job labels
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
relabel_rules = loki.relabel.journal.rules
labels = {
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
// Loki endpoint
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
// Disabled collectors that don't work in containers: hwmon, thermal, mdadm, powersupplyclass, nvme
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
// Provides: namedprocess_namegroup_* metrics
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// Scrape local exporters
prometheus.scrape "local_exporters" {
targets = concat(
prometheus.exporter.unix.default.targets,
prometheus.exporter.process.default.targets,
)
forward_to = [prometheus.relabel.add_instance.receiver]
scrape_interval = "15s"
job_name = "prospero"
}
// Add instance label for Prometheus compatibility
prometheus.relabel "add_instance" {
forward_to = [prometheus.remote_write.default.receiver]
rule {
target_label = "instance"
replacement = "{{inventory_hostname}}"
}
}
// Remote write to Prospero Prometheus
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,196 @@
// Puck Alloy Configuration
// Red Panda Approved 🐼
// Services: Log collection, Process metrics, Docker/cAdvisor metrics
logging {
level = "{{alloy_log_level}}"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "angelia_logs" {
listener {
address = "127.0.0.1:{{angelia_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "angelia",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "athena_logs" {
listener {
address = "127.0.0.1:{{athena_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "athena",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "kairos_logs" {
listener {
address = "127.0.0.1:{{kairos_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "kairos",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "sagittarius_logs" {
listener {
address = "127.0.0.1:{{sagittarius_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "sagittarius",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "spelunker_logs" {
listener {
address = "127.0.0.1:{{spelunker_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "spelunker",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "jupyterlab_logs" {
listener {
address = "127.0.0.1:{{jupyterlab_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "jupyterlab",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
// Disabled collectors that don't work in containers: hwmon, thermal, mdadm, powersupplyclass, nvme
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
// Provides: namedprocess_namegroup_* metrics
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// cAdvisor - Docker container metrics
// Provides: container_* metrics for CPU, memory, network, disk
prometheus.exporter.cadvisor "default" {
docker_host = "unix:///var/run/docker.sock"
storage_duration = "5m"
docker_only = true
}
// Scrape all local exporters
prometheus.scrape "local_exporters" {
targets = concat(
prometheus.exporter.unix.default.targets,
prometheus.exporter.process.default.targets,
prometheus.exporter.cadvisor.default.targets,
)
forward_to = [prometheus.relabel.add_instance.receiver]
scrape_interval = "15s"
job_name = "puck"
}
// Add instance label for Prometheus compatibility
prometheus.relabel "add_instance" {
forward_to = [prometheus.remote_write.default.receiver]
rule {
target_label = "instance"
replacement = "{{inventory_hostname}}"
}
}
// Remote write to Prospero Prometheus
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,155 @@
// Rosalind Alloy Configuration
// Services: Gitea, Lobechat, Nextcloud monitoring
logging {
level = "{{alloy_log_level}}"
format = "logfmt"
}
// ============================================================================
// LOG COLLECTION - Loki Forwarding
// ============================================================================
// System log files
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
// Systemd journal logs (includes AnythingLLM server/collector)
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
// Gitea application logs
loki.source.file "gitea_logs" {
targets = [
{__path__ = "/var/log/gitea/gitea.log", job = "gitea"},
]
forward_to = [loki.write.default.receiver]
}
// Apache access and error logs (Nextcloud)
loki.source.file "apache_logs" {
targets = [
{__path__ = "/var/log/apache2/access.log", job = "apache_access"},
{__path__ = "/var/log/apache2/error.log", job = "apache_error"},
]
forward_to = [loki.write.default.receiver]
}
// Lobechat Docker syslog
loki.source.syslog "lobechat_logs" {
listener {
address = "127.0.0.1:{{lobechat_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "lobechat",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
// Loki endpoint
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}
// ============================================================================
// METRICS COLLECTION - Prometheus Remote Write
// ============================================================================
// Unix/Node metrics - Incus-safe collectors only
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = [
"arp",
"bcache",
"bonding",
"btrfs",
"hwmon",
"infiniband",
"ipvs",
"mdadm",
"nfs",
"nfsd",
"nvme",
"powersupplyclass",
"rapl",
"thermal_zone",
"zfs",
]
}
// Process exporter - Track all processes by command name
prometheus.exporter.process "default" {
track_children = true
track_threads = true
gather_smaps = false
recheck_on_scrape = true
matcher {
name = "{% raw %}{{.Comm}}{% endraw %}"
cmdline = [".+"]
}
}
// cAdvisor - Docker container metrics (for Lobechat)
prometheus.exporter.cadvisor "default" {
docker_host = "unix:///var/run/docker.sock"
store_container_labels = true
docker_only = true
}
// Prometheus scrape configurations
prometheus.scrape "unix" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
prometheus.scrape "process" {
targets = prometheus.exporter.process.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
prometheus.scrape "cadvisor" {
targets = prometheus.exporter.cadvisor.default.targets
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "15s"
}
// Gitea application metrics
prometheus.scrape "gitea" {
targets = [{
__address__ = "127.0.0.1:{{gitea_web_port}}",
job = "gitea",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}]
forward_to = [prometheus.remote_write.default.receiver]
scrape_interval = "30s"
metrics_path = "/metrics"
bearer_token = "{{gitea_metrics_token}}"
}
// Prometheus remote write endpoint
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}

View File

@@ -0,0 +1,80 @@
logging {
level = "{{alloy_log_level}}"
}
loki.source.file "system_logs" {
targets = [
{__path__ = "/var/log/syslog", job = "syslog"},
{__path__ = "/var/log/auth.log", job = "auth"},
]
forward_to = [loki.write.default.receiver]
}
loki.source.journal "systemd_logs" {
forward_to = [loki.write.default.receiver]
labels = {
job = "systemd",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
loki.source.syslog "haproxy_logs" {
listener {
address = "127.0.0.1:{{haproxy_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "haproxy",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
loki.source.syslog "casdoor_logs" {
listener {
address = "127.0.0.1:{{casdoor_syslog_port}}"
protocol = "tcp"
syslog_format = "{{ syslog_format }}"
labels = {
job = "casdoor",
hostname = "{{inventory_hostname}}",
environment = "{{deployment_environment}}",
}
}
forward_to = [loki.write.default.receiver]
}
prometheus.exporter.unix "default" {
include_exporter_metrics = true
disable_collectors = ["mdadm"]
}
prometheus.scrape "default" {
targets = prometheus.exporter.unix.default.targets
forward_to = [prometheus.remote_write.default.receiver]
job_name = "containers"
}
prometheus.scrape "haproxy" {
targets = [
{"__address__" = "localhost:{{haproxy_stats_port}}", "__metrics_path__" = "/metrics"},
]
scrape_interval = "15s"
forward_to = [prometheus.remote_write.default.receiver]
job_name = "haproxy"
}
prometheus.remote_write "default" {
endpoint {
url = "{{prometheus_remote_write_url}}"
}
}
loki.write "default" {
endpoint {
url = "{{loki_url}}"
}
}