Add new roles for monitoring
This commit is contained in:
parent
98ef8d9896
commit
067b325678
35
ansible/playbooks/monitoring.yml
Normal file
35
ansible/playbooks/monitoring.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
# Playbook to set up monitoring modules
|
||||
|
||||
- name: Install and configure the control node
|
||||
hosts: monitoring
|
||||
become: yes
|
||||
roles:
|
||||
- grafana
|
||||
|
||||
- name: Install and configure Prometheus node - Main node
|
||||
hosts: monitoring
|
||||
become: yes
|
||||
roles:
|
||||
- prometheus
|
||||
vars:
|
||||
blackbox: false
|
||||
scrape_files:
|
||||
- /etc/prometheus/scrape-main.yml
|
||||
|
||||
- name: Install and configure Prometheus node - Blackbox nodes
|
||||
hosts: blackbox
|
||||
become: yes
|
||||
roles:
|
||||
- prometheus
|
||||
- blackbox-exporter
|
||||
vars:
|
||||
blackbox: true
|
||||
scrape_files:
|
||||
- /etc/prometheus/scrape-blackbox.yml
|
||||
|
||||
- name: Install and configure nodes
|
||||
hosts: all
|
||||
become: yes
|
||||
roles:
|
||||
- node-exporter
|
5
ansible/roles/blackbox-exporter/handlers/main.yml
Normal file
5
ansible/roles/blackbox-exporter/handlers/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
## Restart Blackbox-exporter on changes
|
||||
- name: restart blackbox
|
||||
ansible.builtin.service:
|
||||
name: '{{ blackbox_service }}'
|
||||
state: restarted
|
25
ansible/roles/blackbox-exporter/tasks/main.yml
Normal file
25
ansible/roles/blackbox-exporter/tasks/main.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: Load variables
|
||||
include_vars: "{{ ansible_os_family|lower }}.yml"
|
||||
|
||||
- name: Install Blackbox exporter
|
||||
tags: blackbox-install
|
||||
ansible.builtin.package:
|
||||
name: '{{ blackbox_package }}'
|
||||
state: latest
|
||||
|
||||
- name: Define Blackbox configuration
|
||||
tags: blackbox-configure
|
||||
ansible.builtin.template:
|
||||
src: blackbox.config.j2
|
||||
dest: '{{ blackbox_config }}'
|
||||
backup: yes
|
||||
notify: restart blackbox
|
||||
|
||||
- name: Enable Blackbox service
|
||||
tags: blackbox-enable
|
||||
ansible.builtin.service:
|
||||
name: '{{ blackbox_service }}'
|
||||
state: started
|
||||
enabled: yes
|
60
ansible/roles/blackbox-exporter/templates/blackbox.config.j2
Normal file
60
ansible/roles/blackbox-exporter/templates/blackbox.config.j2
Normal file
@ -0,0 +1,60 @@
|
||||
{{ ansible_managed | comment }}
|
||||
|
||||
modules:
|
||||
http_2xx:
|
||||
prober: http
|
||||
{% if inventory_hostname == 'check.home.arpa' %}
|
||||
http:
|
||||
preferred_ip_protocol: "ip4"
|
||||
ip_protocol_fallback: true
|
||||
{% endif %}
|
||||
http_post_2xx:
|
||||
prober: http
|
||||
{% if inventory_hostname == 'check.home.arpa' %}
|
||||
http:
|
||||
method: POST
|
||||
preferred_ip_protocol: "ip4"
|
||||
ip_protocol_fallback: true
|
||||
{% endif %}
|
||||
tcp_connect:
|
||||
prober: tcp
|
||||
smtp_check:
|
||||
prober: tcp
|
||||
timeout: 5s
|
||||
tcp:
|
||||
{% if inventory_hostname == 'check.home.arpa' %}
|
||||
preferred_ip_protocol: "ip4"
|
||||
ip_protocol_fallback: true
|
||||
{% endif %}
|
||||
query_response:
|
||||
- expect: "^220(.*)ESMTP(.*)$"
|
||||
- send: "EHLO prober\r"
|
||||
- expect: "^250-STARTTLS"
|
||||
- send: "STARTTLS\r"
|
||||
- expect: "^220"
|
||||
- starttls: true
|
||||
- send: "EHLO prober\r"
|
||||
- expect: "^250-AUTH"
|
||||
- send: "QUIT\r"
|
||||
imap_check:
|
||||
prober: tcp
|
||||
timeout: 5s
|
||||
tcp:
|
||||
{% if inventory_hostname == 'check.home.arpa' %}
|
||||
preferred_ip_protocol: "ip4"
|
||||
ip_protocol_fallback: true
|
||||
{% endif %}
|
||||
query_response:
|
||||
- expect: "OK.*STARTTLS"
|
||||
- send: ". STARTTLS"
|
||||
- expect: "OK"
|
||||
- starttls: true
|
||||
- send: ". capability"
|
||||
- expect: "CAPABILITY IMAP4rev1"
|
||||
icmp:
|
||||
prober: icmp
|
||||
{% if inventory_hostname == 'check.home.arpa' %}
|
||||
icmp:
|
||||
preferred_ip_protocol: "ip4"
|
||||
ip_protocol_fallback: true
|
||||
{% endif %}
|
5
ansible/roles/blackbox-exporter/vars/debian.yml
Normal file
5
ansible/roles/blackbox-exporter/vars/debian.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# vars file for blackbox role
|
||||
blackbox_package: prometheus-blackbox-exporter
|
||||
blackbox_config: '/etc/prometheus/blackbox.yml'
|
||||
blackbox_service: prometheus-blackbox-exporter
|
5
ansible/roles/blackbox-exporter/vars/openbsd.yml
Normal file
5
ansible/roles/blackbox-exporter/vars/openbsd.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# vars file for blackbox role
|
||||
blackbox_package: blackbox_exporter
|
||||
blackbox_config: '/etc/blackbox_exporter/blackbox.yml'
|
||||
blackbox_service: blackbox_exporter
|
14
ansible/roles/grafana/tasks/main.yml
Normal file
14
ansible/roles/grafana/tasks/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
|
||||
- name: Install Grafana
|
||||
tags: grafana-install
|
||||
ansible.builtin.package:
|
||||
name: 'grafana'
|
||||
state: latest
|
||||
|
||||
- name: Enable Grafana service
|
||||
tags: grafana-enable
|
||||
ansible.builtin.service:
|
||||
name: 'grafana'
|
||||
state: started
|
||||
enabled: yes
|
17
ansible/roles/node-exporter/tasks/main.yml
Normal file
17
ansible/roles/node-exporter/tasks/main.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- name: Load package and service names
|
||||
include_vars: "{{ ansible_os_family|lower }}.yml"
|
||||
|
||||
- name: Install Node-exporter
|
||||
tags: node-install
|
||||
ansible.builtin.package:
|
||||
name: '{{ node_package }}'
|
||||
state: latest
|
||||
|
||||
- name: Enable Node-exporter service
|
||||
tags: node-enable
|
||||
ansible.builtin.service:
|
||||
name: '{{ node_service }}'
|
||||
state: started
|
||||
enabled: yes
|
6
ansible/roles/node-exporter/vars/alpine.yml
Normal file
6
ansible/roles/node-exporter/vars/alpine.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# vars file for node-exporter role
|
||||
node_service: node-exporter
|
||||
|
||||
node_package:
|
||||
- prometheus-node-exporter
|
6
ansible/roles/node-exporter/vars/debian.yml
Normal file
6
ansible/roles/node-exporter/vars/debian.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# vars file for node-exporter role
|
||||
node_service: prometheus-node-exporter
|
||||
|
||||
node_package:
|
||||
- prometheus-node-exporter
|
6
ansible/roles/node-exporter/vars/openbsd.yml
Normal file
6
ansible/roles/node-exporter/vars/openbsd.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# vars file for node-exporter role
|
||||
node_service: node_exporter
|
||||
|
||||
node_package:
|
||||
- node_exporter
|
6
ansible/roles/node-exporter/vars/redhat.yml
Normal file
6
ansible/roles/node-exporter/vars/redhat.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# vars file for node-exporter role
|
||||
node_service: prometheus-node-exporter
|
||||
|
||||
node_package:
|
||||
- golang-github-prometheus-node-exporter
|
6
ansible/roles/node-exporter/vars/suse.yml
Normal file
6
ansible/roles/node-exporter/vars/suse.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
# vars file for node-exporter role
|
||||
node_service: prometheus-node_exporter
|
||||
|
||||
node_package:
|
||||
- golang-github-prometheus-node_exporter
|
11
ansible/roles/prometheus/handlers/main.yml
Normal file
11
ansible/roles/prometheus/handlers/main.yml
Normal file
@ -0,0 +1,11 @@
|
||||
## Restart Prometheus on changes
|
||||
- name: restart prometheus
|
||||
ansible.builtin.service:
|
||||
name: '{{ prometheus_service }}'
|
||||
state: restarted
|
||||
|
||||
## Restart Alertmanger on changes
|
||||
- name: restart alertmanager
|
||||
ansible.builtin.service:
|
||||
name: '{{ alertmanager_service }}'
|
||||
state: restarted
|
56
ansible/roles/prometheus/tasks/main.yml
Normal file
56
ansible/roles/prometheus/tasks/main.yml
Normal file
@ -0,0 +1,56 @@
|
||||
---
|
||||
|
||||
- name: Load variables
|
||||
include_vars: "{{ ansible_os_family|lower }}.yml"
|
||||
|
||||
- name: Install Prometheus
|
||||
tags: prometheus-install
|
||||
ansible.builtin.package:
|
||||
name: '{{ prometheus_package }}'
|
||||
state: latest
|
||||
|
||||
- name: Add scrape configuration
|
||||
tags: prometheus-scrape-configure
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "/etc/prometheus/{{ item.dest }}"
|
||||
loop:
|
||||
- { src: '../templates/scrape-main.yml', dest: 'scrape-main.yml' }
|
||||
- { src: '../templates/scrape-blackbox.yml', dest: 'scrape-blackbox.yml' }
|
||||
notify:
|
||||
- restart prometheus
|
||||
|
||||
- name: Add rules configuration
|
||||
tags: alertmanager-rules-configure
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ alertmanager_rules }}/{{ item.dest }}"
|
||||
loop:
|
||||
- { src: '../templates/hosts.rules', dest: 'hosts.rules.yml' }
|
||||
- { src: '../templates/prometheus.rules', dest: 'prometheus.rules.yml' }
|
||||
- { src: '../templates/blackbox.rules', dest: 'blackbox.rules.yml' }
|
||||
notify:
|
||||
- restart prometheus
|
||||
- restart alertmanager
|
||||
|
||||
- name: Enable Prometheus configuration
|
||||
tags: prometheus-configure
|
||||
ansible.builtin.template:
|
||||
src: prometheus.config.j2
|
||||
dest: '{{ prometheus_config }}'
|
||||
validate: promtool check config %s
|
||||
notify: restart prometheus
|
||||
|
||||
- name: Enable Prometheus service
|
||||
tags: prometheus-enable
|
||||
ansible.builtin.service:
|
||||
name: '{{ prometheus_service }}'
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
- name: Enable Alertmanager service
|
||||
tags: alertmanager-enable
|
||||
ansible.builtin.service:
|
||||
name: '{{ alertmanager_service }}'
|
||||
state: started
|
||||
enabled: yes
|
95
ansible/roles/prometheus/templates/blackbox.rules
Normal file
95
ansible/roles/prometheus/templates/blackbox.rules
Normal file
@ -0,0 +1,95 @@
|
||||
groups:
|
||||
|
||||
- name: BlackboxExporter
|
||||
|
||||
rules:
|
||||
|
||||
- alert: BlackboxProbeFailed
|
||||
expr: 'probe_success == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Blackbox probe failed (instance {{ $labels.instance }})
|
||||
message: Blackbox probe failed for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "Probe failed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxConfigurationReloadFailure
|
||||
expr: 'blackbox_exporter_config_last_reload_successful != 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Blackbox configuration reload failure (instance {{ $labels.instance }})
|
||||
message: Blackbox configuration reload failure (instance {{ $labels.instance }})
|
||||
description: "Blackbox configuration reload failure\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxSlowProbe
|
||||
expr: 'avg_over_time(probe_duration_seconds[1m]) > 1'
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Blackbox slow probe (instance {{ $labels.instance }})
|
||||
message: Blackbox slow probe for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "Blackbox probe took more than 1s to complete\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxProbeHttpFailure
|
||||
expr: 'probe_http_status_code <= 199 OR probe_http_status_code >= 400'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Blackbox probe HTTP failure (instance {{ $labels.instance }})
|
||||
message: Blackbox probe HTTP failure for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "HTTP status code is not 200-399\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxSslCertificateWillExpireSoon
|
||||
expr: '3 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 20'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
|
||||
message: Blackbox SSL certificate will expire soon for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "SSL certificate expires in less than 20 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxSslCertificateWillExpireSoon
|
||||
expr: '0 <= round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 3'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Blackbox SSL certificate will expire soon (instance {{ $labels.instance }})
|
||||
message: Blackbox SSL certificate will expire soon for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "SSL certificate expires in less than 3 days\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxSslCertificateExpired
|
||||
expr: 'round((last_over_time(probe_ssl_earliest_cert_expiry[10m]) - time()) / 86400, 0.1) < 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Blackbox SSL certificate expired (instance {{ $labels.instance }})
|
||||
message: Blackbox SSL certificate expired for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxProbeSlowHttp
|
||||
expr: 'avg_over_time(probe_http_duration_seconds[1m]) > 1'
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Blackbox probe slow HTTP (instance {{ $labels.instance }})
|
||||
message: Blackbox probe slow HTTP for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "HTTP request took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: BlackboxProbeSlowPing
|
||||
expr: 'avg_over_time(probe_icmp_duration_seconds[1m]) > 1'
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Blackbox probe slow ping (instance {{ $labels.instance }})
|
||||
message: Blackbox probe slow ping for {{ $labels.target }} (instance {{ $labels.instance }})
|
||||
description: "Blackbox ping took more than 1s\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
347
ansible/roles/prometheus/templates/hosts.rules
Normal file
347
ansible/roles/prometheus/templates/hosts.rules
Normal file
@ -0,0 +1,347 @@
|
||||
groups:
|
||||
- name: NodeExporter
|
||||
rules:
|
||||
|
||||
- alert: HostOutOfMemory
|
||||
expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of memory (instance {{ $labels.instance }})
|
||||
message: Host out of memory (node {{ $labels.nodename }})
|
||||
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostMemoryUnderMemoryPressure
|
||||
expr: '(rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
||||
message: Host memory under memory pressure (node {{ $labels.nodename }})
|
||||
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostMemoryIsUnderutilized
|
||||
expr: '(100 - (rate(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 1w
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host Memory is underutilized (instance {{ $labels.instance }})
|
||||
message: Host Memory is underutilized (node {{ $labels.nodename }})
|
||||
description: "Node memory is < 20% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualNetworkThroughputIn
|
||||
expr: '(sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput in (instance {{ $labels.instance }})
|
||||
message: Host unusual network throughput in (node {{ $labels.nodename }})
|
||||
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualNetworkThroughputOut
|
||||
expr: '(sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput out (instance {{ $labels.instance }})
|
||||
message: Host unusual network throughput out (node {{ $labels.nodename }})
|
||||
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualDiskReadRate
|
||||
expr: '(sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 20m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read rate (instance {{ $labels.instance }})
|
||||
message: Host unusual disk read rate (node {{ $labels.nodename }})
|
||||
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualDiskWriteRate
|
||||
expr: '(sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 20m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write rate (instance {{ $labels.instance }})
|
||||
message: Host unusual disk write rate (node {{ $labels.nodename }})
|
||||
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostOutOfDiskSpace
|
||||
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of disk space (instance {{ $labels.instance }})
|
||||
message: Host out of disk space (node {{ $labels.nodename }})
|
||||
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostDiskWillFillIn24Hours
|
||||
expr: '((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
||||
message: Host disk will fill in 24 hours (node {{ $labels.nodename }})
|
||||
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostOutOfInodes
|
||||
expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of inodes (instance {{ $labels.instance }})
|
||||
message: Host out of inodes (node {{ $labels.nodename }})
|
||||
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostFilesystemDeviceError
|
||||
expr: 'node_filesystem_device_error == 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host filesystem device error (instance {{ $labels.instance }})
|
||||
message: Host filesystem device error (node {{ $labels.nodename }})
|
||||
description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostInodesWillFillIn24Hours
|
||||
expr: '(node_filesystem_files_free / node_filesystem_files * 100 < 10 and predict_linear(node_filesystem_files_free[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
|
||||
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualDiskReadLatency
|
||||
expr: '(rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read latency (instance {{ $labels.instance }})
|
||||
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualDiskWriteLatency
|
||||
expr: '(rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write latency (instance {{ $labels.instance }})
|
||||
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostHighCpuLoad
|
||||
expr: '(sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host high CPU load (instance {{ $labels.instance }})
|
||||
description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostCpuStealNoisyNeighbor
|
||||
expr: '(avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
||||
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostCpuHighIowait
|
||||
expr: '(avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host CPU high iowait (instance {{ $labels.instance }})
|
||||
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostUnusualDiskIo
|
||||
expr: '(rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk IO (instance {{ $labels.instance }})
|
||||
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostContextSwitching
|
||||
expr: '((rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host context switching (instance {{ $labels.instance }})
|
||||
description: "Context switching is growing on the node (> 10000 / s)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostSwapIsFillingUp
|
||||
expr: '((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host swap is filling up (instance {{ $labels.instance }})
|
||||
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostSystemdServiceCrashed
|
||||
expr: '(node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host systemd service crashed (instance {{ $labels.instance }})
|
||||
description: "systemd service crashed\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostPhysicalComponentTooHot
|
||||
expr: '((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host physical component too hot (instance {{ $labels.instance }})
|
||||
description: "Physical hardware component too hot\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostNodeOvertemperatureAlarm
|
||||
expr: '(node_hwmon_temp_crit_alarm_celsius == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
|
||||
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostRaidArrayGotInactive
|
||||
expr: '(node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host RAID array got inactive (instance {{ $labels.instance }})
|
||||
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostRaidDiskFailure
|
||||
expr: '(node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host RAID disk failure (instance {{ $labels.instance }})
|
||||
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostKernelVersionDeviations
|
||||
expr: '(count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 6h
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host kernel version deviations (instance {{ $labels.instance }})
|
||||
description: "Different kernel versions are running\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostOomKillDetected
|
||||
expr: '(increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
||||
description: "OOM kill detected\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostEdacCorrectableErrorsDetected
|
||||
expr: '(increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostEdacUncorrectableErrorsDetected
|
||||
expr: '(node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostNetworkReceiveErrors
|
||||
expr: '(rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Receive Errors (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostNetworkTransmitErrors
|
||||
expr: '(rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
|
||||
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostNetworkInterfaceSaturated
|
||||
expr: '((rate(node_network_receive_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*|^vnet.*|^veth.*|^tun.*"} > 0.8 < 10000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Interface Saturated (instance {{ $labels.instance }})
|
||||
description: "The network interface \"{{ $labels.device }}\" on \"{{ $labels.instance }}\" is getting overloaded.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostNetworkBondDegraded
|
||||
expr: '((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
|
||||
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostConntrackLimit
|
||||
expr: '(node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host conntrack limit (instance {{ $labels.instance }})
|
||||
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostClockSkew
|
||||
expr: '((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock skew (instance {{ $labels.instance }})
|
||||
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostClockNotSynchronising
|
||||
expr: '(min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock not synchronising (instance {{ $labels.instance }})
|
||||
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: HostRequiresReboot
|
||||
expr: '(node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}'
|
||||
for: 4h
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host requires reboot (instance {{ $labels.instance }})
|
||||
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
70
ansible/roles/prometheus/templates/prometheus.config.j2
Normal file
70
ansible/roles/prometheus/templates/prometheus.config.j2
Normal file
@ -0,0 +1,70 @@
|
||||
{{ ansible_managed | comment }}
|
||||
|
||||
# Global configuration
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- localhost:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
- "{{ alertmanager_rules }}/prometheus.rules.yml"
|
||||
- "{{ alertmanager_rules }}/blackbox.rules.yml"
|
||||
- "{{ alertmanager_rules }}/hosts.rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
|
||||
#scrape_config_files:
|
||||
#{% for item in scrape_files %}
|
||||
# - "{{ item }}"
|
||||
#{% endfor %}
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "prometheus"
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
|
||||
{% if blackbox %}
|
||||
- job_name: "blackbox"
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scrape_interval: 5s
|
||||
static_configs:
|
||||
- targets:
|
||||
- https://labolyon.fr
|
||||
- https://mail.labolyon.fr
|
||||
- https://wiki.labolyon.fr
|
||||
- https://git.labolyon.fr
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: target
|
||||
- target_label: __address__
|
||||
replacement: 127.0.0.1:9115 # The blackbox exporter's real hostname:port.
|
||||
{% else %}
|
||||
|
||||
- job_name: "linux-nodes"
|
||||
static_configs:
|
||||
- targets:
|
||||
- blogs.labolyon.dn42:9100
|
||||
- dn42-router.labolyon.dn42:9100
|
||||
- git.labolyon.dn42:9100
|
||||
- labolyon-fr.labolyon.dn42:9100
|
||||
- lolix-rs1.labolyon.dn42:9100
|
||||
- mail.labolyon.dn42:9100
|
||||
- matrix.labolyon.dn42:9100
|
||||
- monitoring.labolyon.dn42:9100
|
||||
- mosquitto.labolyon.dn42:9100
|
||||
- radius.labolyon.dn42:9100
|
||||
- reverse-proxy.labolyon.dn42:9100
|
||||
- wikilabolyon.dn42:9100
|
||||
{% endif %}
|
246
ansible/roles/prometheus/templates/prometheus.rules
Normal file
246
ansible/roles/prometheus/templates/prometheus.rules
Normal file
@ -0,0 +1,246 @@
|
||||
groups:
|
||||
- name: EmbeddedExporter
|
||||
rules:
|
||||
|
||||
- alert: PrometheusJobMissing
|
||||
expr: 'absent(up{job="prometheus"})'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus job missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTargetMissing
|
||||
expr: 'up == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus target missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusAllTargetsMissing
|
||||
expr: 'sum by (job) (up) == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus all targets missing (instance {{ $labels.instance }})
|
||||
description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTargetMissingWithWarmupTime
|
||||
expr: 'sum by (instance, job) ((up == 0) * on (instance) group_right(job) (node_time_seconds - node_boot_time_seconds > 600))'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus target missing with warmup time (instance {{ $labels.instance }})
|
||||
description: "Allow a job time to start up (10 minutes) before alerting that it's down.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusConfigurationReloadFailure
|
||||
expr: 'prometheus_config_last_reload_successful != 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
|
||||
description: "Prometheus configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTooManyRestarts
|
||||
expr: 'changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus too many restarts (instance {{ $labels.instance }})
|
||||
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
# - alert: PrometheusAlertmanagerJobMissing
|
||||
# expr: 'absent(up{job="alertmanager"})'
|
||||
# for: 0m
|
||||
# labels:
|
||||
# severity: warning
|
||||
# annotations:
|
||||
# summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
|
||||
# description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusAlertmanagerConfigurationReloadFailure
|
||||
expr: 'alertmanager_config_last_reload_successful != 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
|
||||
description: "AlertManager configuration reload error\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusAlertmanagerConfigNotSynced
|
||||
expr: 'count(count_values("config_hash", alertmanager_config_hash)) > 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }})
|
||||
description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusNotConnectedToAlertmanager
|
||||
expr: 'prometheus_notifications_alertmanagers_discovered < 1'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
|
||||
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusRuleEvaluationFailures
|
||||
expr: 'increase(prometheus_rule_evaluation_failures_total[3m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTemplateTextExpansionFailures
|
||||
expr: 'increase(prometheus_template_text_expansion_failures_total[3m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusRuleEvaluationSlow
|
||||
expr: 'prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
|
||||
description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusNotificationsBacklog
|
||||
expr: 'min_over_time(prometheus_notifications_queue_length[10m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
|
||||
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusAlertmanagerNotificationFailing
|
||||
expr: 'rate(alertmanager_notifications_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
|
||||
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTargetEmpty
|
||||
expr: 'prometheus_sd_discovered_targets == 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus target empty (instance {{ $labels.instance }})
|
||||
description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTargetScrapingSlow
|
||||
expr: 'prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus target scraping slow (instance {{ $labels.instance }})
|
||||
description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusLargeScrape
|
||||
expr: 'increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10'
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus large scrape (instance {{ $labels.instance }})
|
||||
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTargetScrapeDuplicate
|
||||
expr: 'increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
|
||||
description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbCheckpointCreationFailures
|
||||
expr: 'increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbCheckpointDeletionFailures
|
||||
expr: 'increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbCompactionsFailed
|
||||
expr: 'increase(prometheus_tsdb_compactions_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbHeadTruncationsFailed
|
||||
expr: 'increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbReloadFailures
|
||||
expr: 'increase(prometheus_tsdb_reloads_failures_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbWalCorruptions
|
||||
expr: 'increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTsdbWalTruncationsFailed
|
||||
expr: 'increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
|
||||
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
||||
|
||||
- alert: PrometheusTimeseriesCardinality
|
||||
expr: 'label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000'
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Prometheus timeseries cardinality (instance {{ $labels.instance }})
|
||||
description: "The \"{{ $labels.name }}\" timeseries cardinality is getting very high: {{ $value }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
|
18
ansible/roles/prometheus/templates/scrape-blackbox.yml
Normal file
18
ansible/roles/prometheus/templates/scrape-blackbox.yml
Normal file
@ -0,0 +1,18 @@
|
||||
- job_name: "blackbox"
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scrape_interval: 5s
|
||||
static_configs:
|
||||
- targets:
|
||||
- https://labolyon.fr
|
||||
- https://mail.labolyon.fr
|
||||
- https://wiki.labolyon.fr
|
||||
- https://git.labolyon.fr
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: target
|
||||
- target_label: __address__
|
||||
replacement: 127.0.0.1:9115 # The blackbox exporter's real hostname:port.
|
15
ansible/roles/prometheus/templates/scrape-main.yml
Normal file
15
ansible/roles/prometheus/templates/scrape-main.yml
Normal file
@ -0,0 +1,15 @@
|
||||
- job_name: "linux-nodes"
|
||||
static_configs:
|
||||
- targets:
|
||||
- blogs.labolyon.dn42:9100
|
||||
- dn42-router.labolyon.dn42:9100
|
||||
- git.labolyon.dn42:9100
|
||||
- labolyon-fr.labolyon.dn42:9100
|
||||
- lolix-rs1.labolyon.dn42:9100
|
||||
- mail.labolyon.dn42:9100
|
||||
- matrix.labolyon.dn42:9100
|
||||
- monitoring.labolyon.dn42:9100
|
||||
- mosquitto.labolyon.dn42:9100
|
||||
- radius.labolyon.dn42:9100
|
||||
- reverse-proxy.labolyon.dn42:9100
|
||||
- wikilabolyon.dn42:9100
|
10
ansible/roles/prometheus/vars/alpine.yml
Normal file
10
ansible/roles/prometheus/vars/alpine.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# vars file for prometheus role
|
||||
prometheus_package:
|
||||
- prometheus
|
||||
- alertmanager
|
||||
prometheus_service: prometheus
|
||||
alertmanager_service: alertmanager
|
||||
prometheus_config: '/etc/prometheus/prometheus.yml'
|
||||
alertmanager_config: '/etc/alertmanager/alertmanager.yml'
|
||||
alertmanager_rules: '/etc/alertmanager/rules'
|
10
ansible/roles/prometheus/vars/debian.yml
Normal file
10
ansible/roles/prometheus/vars/debian.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# vars file for prometheus role
|
||||
prometheus_package:
|
||||
- prometheus
|
||||
- prometheus-alertmanager
|
||||
prometheus_service: prometheus
|
||||
alertmanager_service: prometheus-alertmanager
|
||||
prometheus_config: '/etc/prometheus/prometheus.yml'
|
||||
alertmanager_config: '/etc/prometheus/alertmanager.yml'
|
||||
alertmanager_rules: '/etc/prometheus/rules'
|
10
ansible/roles/prometheus/vars/openbsd.yml
Normal file
10
ansible/roles/prometheus/vars/openbsd.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# vars file for prometheus role
|
||||
prometheus_package:
|
||||
- prometheus
|
||||
- alertmanager
|
||||
prometheus_service: prometheus
|
||||
alertmanager_service: alertmanager
|
||||
prometheus_config: '/etc/prometheus/prometheus.yml'
|
||||
alertmanager_config: '/etc/alertmanager/alertmanager.yml'
|
||||
alertmanager_rules: '/etc/alertmanager/rules'
|
Loading…
x
Reference in New Issue
Block a user