From 067b3256783098f29e517f5beab37ed19abc020f Mon Sep 17 00:00:00 2001 From: Vincent Finance Date: Thu, 9 Jan 2025 21:06:28 +0100 Subject: [PATCH] Add new roles for monitoring --- ansible/playbooks/monitoring.yml | 35 ++ .../roles/blackbox-exporter/handlers/main.yml | 5 + .../roles/blackbox-exporter/tasks/main.yml | 25 ++ .../templates/blackbox.config.j2 | 60 +++ .../roles/blackbox-exporter/vars/debian.yml | 5 + .../roles/blackbox-exporter/vars/openbsd.yml | 5 + ansible/roles/grafana/tasks/main.yml | 14 + ansible/roles/node-exporter/tasks/main.yml | 17 + ansible/roles/node-exporter/vars/alpine.yml | 6 + ansible/roles/node-exporter/vars/debian.yml | 6 + ansible/roles/node-exporter/vars/openbsd.yml | 6 + ansible/roles/node-exporter/vars/redhat.yml | 6 + ansible/roles/node-exporter/vars/suse.yml | 6 + ansible/roles/prometheus/handlers/main.yml | 11 + ansible/roles/prometheus/tasks/main.yml | 56 +++ .../roles/prometheus/templates/blackbox.rules | 95 +++++ .../roles/prometheus/templates/hosts.rules | 347 ++++++++++++++++++ .../prometheus/templates/prometheus.config.j2 | 70 ++++ .../prometheus/templates/prometheus.rules | 246 +++++++++++++ .../prometheus/templates/scrape-blackbox.yml | 18 + .../prometheus/templates/scrape-main.yml | 15 + ansible/roles/prometheus/vars/alpine.yml | 10 + ansible/roles/prometheus/vars/debian.yml | 10 + ansible/roles/prometheus/vars/openbsd.yml | 10 + 24 files changed, 1084 insertions(+) create mode 100644 ansible/playbooks/monitoring.yml create mode 100644 ansible/roles/blackbox-exporter/handlers/main.yml create mode 100644 ansible/roles/blackbox-exporter/tasks/main.yml create mode 100644 ansible/roles/blackbox-exporter/templates/blackbox.config.j2 create mode 100644 ansible/roles/blackbox-exporter/vars/debian.yml create mode 100644 ansible/roles/blackbox-exporter/vars/openbsd.yml create mode 100644 ansible/roles/grafana/tasks/main.yml create mode 100644 ansible/roles/node-exporter/tasks/main.yml create mode 100644 ansible/roles/node-exporter/vars/alpine.yml create mode 100644 ansible/roles/node-exporter/vars/debian.yml create mode 100644 ansible/roles/node-exporter/vars/openbsd.yml create mode 100644 ansible/roles/node-exporter/vars/redhat.yml create mode 100644 ansible/roles/node-exporter/vars/suse.yml create mode 100644 ansible/roles/prometheus/handlers/main.yml create mode 100644 ansible/roles/prometheus/tasks/main.yml create mode 100644 ansible/roles/prometheus/templates/blackbox.rules create mode 100644 ansible/roles/prometheus/templates/hosts.rules create mode 100644 ansible/roles/prometheus/templates/prometheus.config.j2 create mode 100644 ansible/roles/prometheus/templates/prometheus.rules create mode 100644 ansible/roles/prometheus/templates/scrape-blackbox.yml create mode 100644 ansible/roles/prometheus/templates/scrape-main.yml create mode 100644 ansible/roles/prometheus/vars/alpine.yml create mode 100644 ansible/roles/prometheus/vars/debian.yml create mode 100644 ansible/roles/prometheus/vars/openbsd.yml diff --git a/ansible/playbooks/monitoring.yml b/ansible/playbooks/monitoring.yml new file mode 100644 index 0000000..867782b --- /dev/null +++ b/ansible/playbooks/monitoring.yml @@ -0,0 +1,35 @@ +--- +# Playbook to set up monitoring modules + +- name: Install and configure the control node + hosts: monitoring + become: yes + roles: + - grafana + +- name: Install and configure Prometheus node - Main node + hosts: monitoring + become: yes + roles: + - prometheus + vars: + blackbox: false + scrape_files: + - /etc/prometheus/scrape-main.yml + +- name: Install and configure Prometheus node - Blackbox nodes + hosts: blackbox + become: yes + roles: + - prometheus + - blackbox-exporter + vars: + blackbox: true + scrape_files: + - /etc/prometheus/scrape-blackbox.yml + +- name: Install and configure nodes + hosts: all + become: yes + roles: + - node-exporter diff --git a/ansible/roles/blackbox-exporter/handlers/main.yml b/ansible/roles/blackbox-exporter/handlers/main.yml new file mode 100644 index 0000000..0f89371 --- /dev/null +++ b/ansible/roles/blackbox-exporter/handlers/main.yml @@ -0,0 +1,5 @@ +## Restart Blackbox-exporter on changes +- name: restart blackbox + ansible.builtin.service: + name: '{{ blackbox_service }}' + state: restarted diff --git a/ansible/roles/blackbox-exporter/tasks/main.yml b/ansible/roles/blackbox-exporter/tasks/main.yml new file mode 100644 index 0000000..2c945e0 --- /dev/null +++ b/ansible/roles/blackbox-exporter/tasks/main.yml @@ -0,0 +1,25 @@ +--- + +- name: Load variables + include_vars: "{{ ansible_os_family|lower }}.yml" + +- name: Install Blackbox exporter + tags: blackbox-install + ansible.builtin.package: + name: '{{ blackbox_package }}' + state: latest + +- name: Define Blackbox configuration + tags: blackbox-configure + ansible.builtin.template: + src: blackbox.config.j2 + dest: '{{ blackbox_config }}' + backup: yes + notify: restart blackbox + +- name: Enable Blackbox service + tags: blackbox-enable + ansible.builtin.service: + name: '{{ blackbox_service }}' + state: started + enabled: yes diff --git a/ansible/roles/blackbox-exporter/templates/blackbox.config.j2 b/ansible/roles/blackbox-exporter/templates/blackbox.config.j2 new file mode 100644 index 0000000..907c6d1 --- /dev/null +++ b/ansible/roles/blackbox-exporter/templates/blackbox.config.j2 @@ -0,0 +1,60 @@ +{{ ansible_managed | comment }} + +modules: + http_2xx: + prober: http +{% if inventory_hostname == 'check.home.arpa' %} + http: + preferred_ip_protocol: "ip4" + ip_protocol_fallback: true +{% endif %} + http_post_2xx: + prober: http +{% if inventory_hostname == 'check.home.arpa' %} + http: + method: POST + preferred_ip_protocol: "ip4" + ip_protocol_fallback: true +{% endif %} + tcp_connect: + prober: tcp + smtp_check: + prober: tcp + timeout: 5s + tcp: +{% if inventory_hostname == 'check.home.arpa' %} + preferred_ip_protocol: "ip4" + ip_protocol_fallback: true +{% endif %} + query_response: + - expect: "^220(.*)ESMTP(.*)$" + - send: "EHLO prober\r" + - expect: "^250-STARTTLS" + - send: "STARTTLS\r" + - expect: "^220" + - starttls: true + - send: "EHLO prober\r" + - expect: "^250-AUTH" + - send: "QUIT\r" + imap_check: + prober: tcp + timeout: 5s + tcp: +{% if inventory_hostname == 'check.home.arpa' %} + preferred_ip_protocol: "ip4" + ip_protocol_fallback: true +{% endif %} + query_response: + - expect: "OK.*STARTTLS" + - send: ". STARTTLS" + - expect: "OK" + - starttls: true + - send: ". capability" + - expect: "CAPABILITY IMAP4rev1" + icmp: + prober: icmp +{% if inventory_hostname == 'check.home.arpa' %} + icmp: + preferred_ip_protocol: "ip4" + ip_protocol_fallback: true +{% endif %} diff --git a/ansible/roles/blackbox-exporter/vars/debian.yml b/ansible/roles/blackbox-exporter/vars/debian.yml new file mode 100644 index 0000000..9bee0e1 --- /dev/null +++ b/ansible/roles/blackbox-exporter/vars/debian.yml @@ -0,0 +1,5 @@ +--- +# vars file for blackbox role +blackbox_package: prometheus-blackbox-exporter +blackbox_config: '/etc/prometheus/blackbox.yml' +blackbox_service: prometheus-blackbox-exporter \ No newline at end of file diff --git a/ansible/roles/blackbox-exporter/vars/openbsd.yml b/ansible/roles/blackbox-exporter/vars/openbsd.yml new file mode 100644 index 0000000..b98c001 --- /dev/null +++ b/ansible/roles/blackbox-exporter/vars/openbsd.yml @@ -0,0 +1,5 @@ +--- +# vars file for blackbox role +blackbox_package: blackbox_exporter +blackbox_config: '/etc/blackbox_exporter/blackbox.yml' +blackbox_service: blackbox_exporter \ No newline at end of file diff --git a/ansible/roles/grafana/tasks/main.yml b/ansible/roles/grafana/tasks/main.yml new file mode 100644 index 0000000..61a2b94 --- /dev/null +++ b/ansible/roles/grafana/tasks/main.yml @@ -0,0 +1,14 @@ +--- + +- name: Install Grafana + tags: grafana-install + ansible.builtin.package: + name: 'grafana' + state: latest + +- name: Enable Grafana service + tags: grafana-enable + ansible.builtin.service: + name: 'grafana' + state: started + enabled: yes diff --git a/ansible/roles/node-exporter/tasks/main.yml b/ansible/roles/node-exporter/tasks/main.yml new file mode 100644 index 0000000..e77e558 --- /dev/null +++ b/ansible/roles/node-exporter/tasks/main.yml @@ -0,0 +1,17 @@ +--- + +- name: Load package and service names + include_vars: "{{ ansible_os_family|lower }}.yml" + +- name: Install Node-exporter + tags: node-install + ansible.builtin.package: + name: '{{ node_package }}' + state: latest + +- name: Enable Node-exporter service + tags: node-enable + ansible.builtin.service: + name: '{{ node_service }}' + state: started + enabled: yes diff --git a/ansible/roles/node-exporter/vars/alpine.yml b/ansible/roles/node-exporter/vars/alpine.yml new file mode 100644 index 0000000..6c5f645 --- /dev/null +++ b/ansible/roles/node-exporter/vars/alpine.yml @@ -0,0 +1,6 @@ +--- +# vars file for node-exporter role +node_service: node-exporter + +node_package: + - prometheus-node-exporter diff --git a/ansible/roles/node-exporter/vars/debian.yml b/ansible/roles/node-exporter/vars/debian.yml new file mode 100644 index 0000000..6fa4dda --- /dev/null +++ b/ansible/roles/node-exporter/vars/debian.yml @@ -0,0 +1,6 @@ +--- +# vars file for node-exporter role +node_service: prometheus-node-exporter + +node_package: + - prometheus-node-exporter diff --git a/ansible/roles/node-exporter/vars/openbsd.yml b/ansible/roles/node-exporter/vars/openbsd.yml new file mode 100644 index 0000000..f90f399 --- /dev/null +++ b/ansible/roles/node-exporter/vars/openbsd.yml @@ -0,0 +1,6 @@ +--- +# vars file for node-exporter role +node_service: node_exporter + +node_package: + - node_exporter diff --git a/ansible/roles/node-exporter/vars/redhat.yml b/ansible/roles/node-exporter/vars/redhat.yml new file mode 100644 index 0000000..4932336 --- /dev/null +++ b/ansible/roles/node-exporter/vars/redhat.yml @@ -0,0 +1,6 @@ +--- +# vars file for node-exporter role +node_service: prometheus-node-exporter + +node_package: + - golang-github-prometheus-node-exporter diff --git a/ansible/roles/node-exporter/vars/suse.yml b/ansible/roles/node-exporter/vars/suse.yml new file mode 100644 index 0000000..76facca --- /dev/null +++ b/ansible/roles/node-exporter/vars/suse.yml @@ -0,0 +1,6 @@ +--- +# vars file for node-exporter role +node_service: prometheus-node_exporter + +node_package: + - golang-github-prometheus-node_exporter diff --git a/ansible/roles/prometheus/handlers/main.yml b/ansible/roles/prometheus/handlers/main.yml new file mode 100644 index 0000000..980cd17 --- /dev/null +++ b/ansible/roles/prometheus/handlers/main.yml @@ -0,0 +1,11 @@ +## Restart Prometheus on changes +- name: restart prometheus + ansible.builtin.service: + name: '{{ prometheus_service }}' + state: restarted + +## Restart Alertmanger on changes +- name: restart alertmanager + ansible.builtin.service: + name: '{{ alertmanager_service }}' + state: restarted diff --git a/ansible/roles/prometheus/tasks/main.yml b/ansible/roles/prometheus/tasks/main.yml new file mode 100644 index 0000000..8c5334b --- /dev/null +++ b/ansible/roles/prometheus/tasks/main.yml @@ -0,0 +1,56 @@ +--- + +- name: Load variables + include_vars: "{{ ansible_os_family|lower }}.yml" + +- name: Install Prometheus + tags: prometheus-install + ansible.builtin.package: + name: '{{ prometheus_package }}' + state: latest + +- name: Add scrape configuration + tags: prometheus-scrape-configure + ansible.builtin.copy: + src: "{{ item.src }}" + dest: "/etc/prometheus/{{ item.dest }}" + loop: + - { src: '../templates/scrape-main.yml', dest: 'scrape-main.yml' } + - { src: '../templates/scrape-blackbox.yml', dest: 'scrape-blackbox.yml' } + notify: + - restart prometheus + +- name: Add rules configuration + tags: alertmanager-rules-configure + ansible.builtin.copy: + src: "{{ item.src }}" + dest: "{{ alertmanager_rules }}/{{ item.dest }}" + loop: + - { src: '../templates/hosts.rules', dest: 'hosts.rules.yml' } + - { src: '../templates/prometheus.rules', dest: 'prometheus.rules.yml' } + - { src: '../templates/blackbox.rules', dest: 'blackbox.rules.yml' } + notify: + - restart prometheus + - restart alertmanager + +- name: Enable Prometheus configuration + tags: prometheus-configure + ansible.builtin.template: + src: prometheus.config.j2 + dest: '{{ prometheus_config }}' + validate: promtool check config %s + notify: restart prometheus + +- name: Enable Prometheus service + tags: prometheus-enable + ansible.builtin.service: + name: '{{ prometheus_service }}' + state: started + enabled: yes + +- name: Enable Alertmanager service + tags: alertmanager-enable + ansible.builtin.service: + name: '{{ alertmanager_service }}' + state: started + enabled: yes diff --git a/ansible/roles/prometheus/templates/blackbox.rules b/ansible/roles/prometheus/templates/blackbox.rules new file mode 100644 index 0000000..74bf25f --- /dev/null +++ b/ansible/roles/prometheus/templates/blackbox.rules @@ -0,0 +1,95 @@ +groups: + +- name: BlackboxExporter + + rules: + + - alert: BlackboxProbeFailed + expr: 'probe_success == 0' + for: 0m + labels: + severity: critical + annotations: + summary: Blackbox probe failed (instance {{ $labels.instance }}) + message: Blackbox probe failed for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "Probe failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxConfigurationReloadFailure + expr: 'blackbox_exporter_config_last_reload_successful != 1' + for: 0m + labels: + severity: warning + annotations: + summary: Blackbox configuration reload failure (instance {{ $labels.instance }}) + message: Blackbox configuration reload failure (instance {{ $labels.instance }}) + description: "Blackbox configuration reload failure\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxSlowProbe + expr: 'avg_over_time(probe_duration_seconds[1m]) > 1' + for: 10m + labels: + severity: warning + annotations: + summary: Blackbox slow probe (instance {{ $labels.instance }}) + message: Blackbox slow probe for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "Blackbox probe took more than 1s to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxProbeHttpFailure + expr: 'probe_http_status_code <= 199 OR probe_http_status_code >= 400' + for: 0m + labels: + severity: critical + annotations: + summary: Blackbox probe HTTP failure (instance {{ $labels.instance }}) + message: Blackbox probe HTTP failure for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxSslCertificateWillExpireSoon + expr: '3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20' + for: 0m + labels: + severity: warning + annotations: + summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) + message: Blackbox SSL certificate will expire soon for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "SSL certificate expires in less than 20 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxSslCertificateWillExpireSoon + expr: '0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3' + for: 0m + labels: + severity: critical + annotations: + summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }}) + message: Blackbox SSL certificate will expire soon for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "SSL certificate expires in less than 3 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxSslCertificateExpired + expr: 'round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0' + for: 0m + labels: + severity: critical + annotations: + summary: Blackbox SSL certificate expired (instance {{ $labels.instance }}) + message: Blackbox SSL certificate expired for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxProbeSlowHttp + expr: 'avg_over_time(probe_http_duration_seconds[1m]) > 1' + for: 1m + labels: + severity: warning + annotations: + summary: Blackbox probe slow HTTP (instance {{ $labels.instance }}) + message: Blackbox probe slow HTTP for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "HTTP request took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: BlackboxProbeSlowPing + expr: 'avg_over_time(probe_icmp_duration_seconds[1m]) > 1' + for: 1m + labels: + severity: warning + annotations: + summary: Blackbox probe slow ping (instance {{ $labels.instance }}) + message: Blackbox probe slow ping for {{ $labels.target }} (instance {{ $labels.instance }}) + description: "Blackbox ping took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" diff --git a/ansible/roles/prometheus/templates/hosts.rules b/ansible/roles/prometheus/templates/hosts.rules new file mode 100644 index 0000000..efd1fc4 --- /dev/null +++ b/ansible/roles/prometheus/templates/hosts.rules @@ -0,0 +1,347 @@ +groups: +- name: NodeExporter + rules: + + - alert: HostOutOfMemory + expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host out of memory (instance {{ $labels.instance }}) + message: Host out of memory (node {{ $labels.nodename }}) + description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostMemoryUnderMemoryPressure + expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host memory under memory pressure (instance {{ $labels.instance }}) + message: Host memory under memory pressure (node {{ $labels.nodename }}) + description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostMemoryIsUnderutilized + expr: '(100 - (rate(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 1w + labels: + severity: info + annotations: + summary: Host Memory is underutilized (instance {{ $labels.instance }}) + message: Host Memory is underutilized (node {{ $labels.nodename }}) + description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualNetworkThroughputIn + expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 5m + labels: + severity: warning + annotations: + summary: Host unusual network throughput in (instance {{ $labels.instance }}) + message: Host unusual network throughput in (node {{ $labels.nodename }}) + description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualNetworkThroughputOut + expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 5m + labels: + severity: warning + annotations: + summary: Host unusual network throughput out (instance {{ $labels.instance }}) + message: Host unusual network throughput out (node {{ $labels.nodename }}) + description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualDiskReadRate + expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 20m + labels: + severity: warning + annotations: + summary: Host unusual disk read rate (instance {{ $labels.instance }}) + message: Host unusual disk read rate (node {{ $labels.nodename }}) + description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualDiskWriteRate + expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 20m + labels: + severity: warning + annotations: + summary: Host unusual disk write rate (instance {{ $labels.instance }}) + message: Host unusual disk write rate (node {{ $labels.nodename }}) + description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostOutOfDiskSpace + expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host out of disk space (instance {{ $labels.instance }}) + message: Host out of disk space (node {{ $labels.nodename }}) + description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostDiskWillFillIn24Hours + expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host disk will fill in 24 hours (instance {{ $labels.instance }}) + message: Host disk will fill in 24 hours (node {{ $labels.nodename }}) + description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostOutOfInodes + expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host out of inodes (instance {{ $labels.instance }}) + message: Host out of inodes (node {{ $labels.nodename }}) + description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostFilesystemDeviceError + expr: 'node_filesystem_device_error == 1' + for: 0m + labels: + severity: critical + annotations: + summary: Host filesystem device error (instance {{ $labels.instance }}) + message: Host filesystem device error (node {{ $labels.nodename }}) + description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostInodesWillFillIn24Hours + expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and predict_linear(node_filesystem_files_free[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }}) + description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualDiskReadLatency + expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 15m + labels: + severity: warning + annotations: + summary: Host unusual disk read latency (instance {{ $labels.instance }}) + description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualDiskWriteLatency + expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 15m + labels: + severity: warning + annotations: + summary: Host unusual disk write latency (instance {{ $labels.instance }}) + description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostHighCpuLoad + expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 10m + labels: + severity: warning + annotations: + summary: Host high CPU load (instance {{ $labels.instance }}) + description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostCpuStealNoisyNeighbor + expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: warning + annotations: + summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }}) + description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostCpuHighIowait + expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 15m + labels: + severity: warning + annotations: + summary: Host CPU high iowait (instance {{ $labels.instance }}) + description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostUnusualDiskIo + expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 15m + labels: + severity: warning + annotations: + summary: Host unusual disk IO (instance {{ $labels.instance }}) + description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostContextSwitching + expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: warning + annotations: + summary: Host context switching (instance {{ $labels.instance }}) + description: "Context switching is growing on the node (> 10000 / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostSwapIsFillingUp + expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host swap is filling up (instance {{ $labels.instance }}) + description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostSystemdServiceCrashed + expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: warning + annotations: + summary: Host systemd service crashed (instance {{ $labels.instance }}) + description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostPhysicalComponentTooHot + expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 5m + labels: + severity: warning + annotations: + summary: Host physical component too hot (instance {{ $labels.instance }}) + description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostNodeOvertemperatureAlarm + expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: critical + annotations: + summary: Host node overtemperature alarm (instance {{ $labels.instance }}) + description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostRaidArrayGotInactive + expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: critical + annotations: + summary: Host RAID array got inactive (instance {{ $labels.instance }}) + description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostRaidDiskFailure + expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host RAID disk failure (instance {{ $labels.instance }}) + description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostKernelVersionDeviations + expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 6h + labels: + severity: warning + annotations: + summary: Host kernel version deviations (instance {{ $labels.instance }}) + description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostOomKillDetected + expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: warning + annotations: + summary: Host OOM kill detected (instance {{ $labels.instance }}) + description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostEdacCorrectableErrorsDetected + expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: info + annotations: + summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }}) + description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostEdacUncorrectableErrorsDetected + expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 0m + labels: + severity: warning + annotations: + summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }}) + description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostNetworkReceiveErrors + expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host Network Receive Errors (instance {{ $labels.instance }}) + description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostNetworkTransmitErrors + expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host Network Transmit Errors (instance {{ $labels.instance }}) + description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostNetworkInterfaceSaturated + expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 1m + labels: + severity: warning + annotations: + summary: Host Network Interface Saturated (instance {{ $labels.instance }}) + description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostNetworkBondDegraded + expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host Network Bond Degraded (instance {{ $labels.instance }}) + description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostConntrackLimit + expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 5m + labels: + severity: warning + annotations: + summary: Host conntrack limit (instance {{ $labels.instance }}) + description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostClockSkew + expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 10m + labels: + severity: warning + annotations: + summary: Host clock skew (instance {{ $labels.instance }}) + description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostClockNotSynchronising + expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 2m + labels: + severity: warning + annotations: + summary: Host clock not synchronising (instance {{ $labels.instance }}) + description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: HostRequiresReboot + expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}' + for: 4h + labels: + severity: info + annotations: + summary: Host requires reboot (instance {{ $labels.instance }}) + description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" \ No newline at end of file diff --git a/ansible/roles/prometheus/templates/prometheus.config.j2 b/ansible/roles/prometheus/templates/prometheus.config.j2 new file mode 100644 index 0000000..14e4b2b --- /dev/null +++ b/ansible/roles/prometheus/templates/prometheus.config.j2 @@ -0,0 +1,70 @@ +{{ ansible_managed | comment }} + +# Global configuration +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + - localhost:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + - "{{ alertmanager_rules }}/prometheus.rules.yml" + - "{{ alertmanager_rules }}/blackbox.rules.yml" + - "{{ alertmanager_rules }}/hosts.rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: + +#scrape_config_files: +#{% for item in scrape_files %} +# - "{{ item }}" +#{% endfor %} + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + +{% if blackbox %} + - job_name: "blackbox" + metrics_path: /probe + params: + module: [http_2xx] + scrape_interval: 5s + static_configs: + - targets: + - https://labolyon.fr + - https://mail.labolyon.fr + - https://wiki.labolyon.fr + - https://git.labolyon.fr + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: target + - target_label: __address__ + replacement: 127.0.0.1:9115 # The blackbox exporter's real hostname:port. +{% else %} + + - job_name: "linux-nodes" + static_configs: + - targets: + - blogs.labolyon.dn42:9100 + - dn42-router.labolyon.dn42:9100 + - git.labolyon.dn42:9100 + - labolyon-fr.labolyon.dn42:9100 + - lolix-rs1.labolyon.dn42:9100 + - mail.labolyon.dn42:9100 + - matrix.labolyon.dn42:9100 + - monitoring.labolyon.dn42:9100 + - mosquitto.labolyon.dn42:9100 + - radius.labolyon.dn42:9100 + - reverse-proxy.labolyon.dn42:9100 + - wikilabolyon.dn42:9100 +{% endif %} diff --git a/ansible/roles/prometheus/templates/prometheus.rules b/ansible/roles/prometheus/templates/prometheus.rules new file mode 100644 index 0000000..38ed990 --- /dev/null +++ b/ansible/roles/prometheus/templates/prometheus.rules @@ -0,0 +1,246 @@ +groups: +- name: EmbeddedExporter + rules: + + - alert: PrometheusJobMissing + expr: 'absent(up{job="prometheus"})' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus job missing (instance {{ $labels.instance }}) + description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTargetMissing + expr: 'up == 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target missing (instance {{ $labels.instance }}) + description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusAllTargetsMissing + expr: 'sum by (job) (up) == 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus all targets missing (instance {{ $labels.instance }}) + description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTargetMissingWithWarmupTime + expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target missing with warmup time (instance {{ $labels.instance }}) + description: "Allow a job time to start up (10 minutes) before alerting that it's down.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusConfigurationReloadFailure + expr: 'prometheus_config_last_reload_successful != 1' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus configuration reload failure (instance {{ $labels.instance }}) + description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTooManyRestarts + expr: 'changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus too many restarts (instance {{ $labels.instance }}) + description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + +# - alert: PrometheusAlertmanagerJobMissing +# expr: 'absent(up{job="alertmanager"})' +# for: 0m +# labels: +# severity: warning +# annotations: +# summary: Prometheus AlertManager job missing (instance {{ $labels.instance }}) +# description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusAlertmanagerConfigurationReloadFailure + expr: 'alertmanager_config_last_reload_successful != 1' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }}) + description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusAlertmanagerConfigNotSynced + expr: 'count(count_values("config_hash", alertmanager_config_hash)) > 1' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }}) + description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusNotConnectedToAlertmanager + expr: 'prometheus_notifications_alertmanagers_discovered < 1' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }}) + description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusRuleEvaluationFailures + expr: 'increase(prometheus_rule_evaluation_failures_total[3m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus rule evaluation failures (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTemplateTextExpansionFailures + expr: 'increase(prometheus_template_text_expansion_failures_total[3m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus template text expansion failures (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusRuleEvaluationSlow + expr: 'prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds' + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus rule evaluation slow (instance {{ $labels.instance }}) + description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusNotificationsBacklog + expr: 'min_over_time(prometheus_notifications_queue_length[10m]) > 0' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus notifications backlog (instance {{ $labels.instance }}) + description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusAlertmanagerNotificationFailing + expr: 'rate(alertmanager_notifications_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }}) + description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTargetEmpty + expr: 'prometheus_sd_discovered_targets == 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus target empty (instance {{ $labels.instance }}) + description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTargetScrapingSlow + expr: 'prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05' + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus target scraping slow (instance {{ $labels.instance }}) + description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusLargeScrape + expr: 'increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10' + for: 5m + labels: + severity: warning + annotations: + summary: Prometheus large scrape (instance {{ $labels.instance }}) + description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTargetScrapeDuplicate + expr: 'increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus target scrape duplicate (instance {{ $labels.instance }}) + description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbCheckpointCreationFailures + expr: 'increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbCheckpointDeletionFailures + expr: 'increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbCompactionsFailed + expr: 'increase(prometheus_tsdb_compactions_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbHeadTruncationsFailed + expr: 'increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbReloadFailures + expr: 'increase(prometheus_tsdb_reloads_failures_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB reload failures (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbWalCorruptions + expr: 'increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTsdbWalTruncationsFailed + expr: 'increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0' + for: 0m + labels: + severity: critical + annotations: + summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }}) + description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" + + - alert: PrometheusTimeseriesCardinality + expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000' + for: 0m + labels: + severity: warning + annotations: + summary: Prometheus timeseries cardinality (instance {{ $labels.instance }}) + description: "The \"{{ $labels.name }}\" timeseries cardinality is getting very high: {{ $value }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" diff --git a/ansible/roles/prometheus/templates/scrape-blackbox.yml b/ansible/roles/prometheus/templates/scrape-blackbox.yml new file mode 100644 index 0000000..91445e4 --- /dev/null +++ b/ansible/roles/prometheus/templates/scrape-blackbox.yml @@ -0,0 +1,18 @@ + - job_name: "blackbox" + metrics_path: /probe + params: + module: [http_2xx] + scrape_interval: 5s + static_configs: + - targets: + - https://labolyon.fr + - https://mail.labolyon.fr + - https://wiki.labolyon.fr + - https://git.labolyon.fr + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: target + - target_label: __address__ + replacement: 127.0.0.1:9115 # The blackbox exporter's real hostname:port. diff --git a/ansible/roles/prometheus/templates/scrape-main.yml b/ansible/roles/prometheus/templates/scrape-main.yml new file mode 100644 index 0000000..247bb86 --- /dev/null +++ b/ansible/roles/prometheus/templates/scrape-main.yml @@ -0,0 +1,15 @@ + - job_name: "linux-nodes" + static_configs: + - targets: + - blogs.labolyon.dn42:9100 + - dn42-router.labolyon.dn42:9100 + - git.labolyon.dn42:9100 + - labolyon-fr.labolyon.dn42:9100 + - lolix-rs1.labolyon.dn42:9100 + - mail.labolyon.dn42:9100 + - matrix.labolyon.dn42:9100 + - monitoring.labolyon.dn42:9100 + - mosquitto.labolyon.dn42:9100 + - radius.labolyon.dn42:9100 + - reverse-proxy.labolyon.dn42:9100 + - wikilabolyon.dn42:9100 \ No newline at end of file diff --git a/ansible/roles/prometheus/vars/alpine.yml b/ansible/roles/prometheus/vars/alpine.yml new file mode 100644 index 0000000..c317a34 --- /dev/null +++ b/ansible/roles/prometheus/vars/alpine.yml @@ -0,0 +1,10 @@ +--- +# vars file for prometheus role +prometheus_package: + - prometheus + - alertmanager +prometheus_service: prometheus +alertmanager_service: alertmanager +prometheus_config: '/etc/prometheus/prometheus.yml' +alertmanager_config: '/etc/alertmanager/alertmanager.yml' +alertmanager_rules: '/etc/alertmanager/rules' diff --git a/ansible/roles/prometheus/vars/debian.yml b/ansible/roles/prometheus/vars/debian.yml new file mode 100644 index 0000000..eed4501 --- /dev/null +++ b/ansible/roles/prometheus/vars/debian.yml @@ -0,0 +1,10 @@ +--- +# vars file for prometheus role +prometheus_package: + - prometheus + - prometheus-alertmanager +prometheus_service: prometheus +alertmanager_service: prometheus-alertmanager +prometheus_config: '/etc/prometheus/prometheus.yml' +alertmanager_config: '/etc/prometheus/alertmanager.yml' +alertmanager_rules: '/etc/prometheus/rules' diff --git a/ansible/roles/prometheus/vars/openbsd.yml b/ansible/roles/prometheus/vars/openbsd.yml new file mode 100644 index 0000000..c317a34 --- /dev/null +++ b/ansible/roles/prometheus/vars/openbsd.yml @@ -0,0 +1,10 @@ +--- +# vars file for prometheus role +prometheus_package: + - prometheus + - alertmanager +prometheus_service: prometheus +alertmanager_service: alertmanager +prometheus_config: '/etc/prometheus/prometheus.yml' +alertmanager_config: '/etc/alertmanager/alertmanager.yml' +alertmanager_rules: '/etc/alertmanager/rules'