June
d0a28589c6
All checks were successful
/ Ansible Lint (push) Successful in 1m39s
This groups the files and templates for each host together and therefore makes it easier to see all the (config) files for a host. Also clean up incorrect, unused docker_compose config for mumble and clean up unused engelsystem configs.
583 lines
34 KiB
YAML
583 lines
34 KiB
YAML
# Links & Resources:
|
|
# - https://samber.github.io/awesome-prometheus-alerts/rules
|
|
groups:
|
|
- name: node-exporter
|
|
rules:
|
|
- alert: HostOutOfMemory
|
|
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host out of memory (instance {{ $labels.instance }})
|
|
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}"
|
|
- alert: HostMemoryUnderMemoryPressure
|
|
expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
|
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}"
|
|
# You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
|
|
- alert: HostMemoryIsUnderutilized
|
|
expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 1w
|
|
labels:
|
|
severity: info
|
|
annotations:
|
|
summary: Host Memory is underutilized (instance {{ $labels.instance }})
|
|
description: "Node memory is < 10% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}"
|
|
- alert: HostUnusualNetworkThroughputIn
|
|
expr: (sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host unusual network throughput in (instance {{ $labels.instance }})
|
|
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}"
|
|
- alert: HostUnusualNetworkThroughputOut
|
|
expr: (sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host unusual network throughput out (instance {{ $labels.instance }})
|
|
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}"
|
|
# Have different disk read and write rate alerts for VMs and physical machines.
|
|
- alert: VirtualHostUnusualDiskReadRate
|
|
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Virtual host unusual disk read rate (instance {{ $labels.instance }})
|
|
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
|
|
- alert: VirtualHostUnusualDiskWriteRate
|
|
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Virtual host unusual disk write rate (instance {{ $labels.instance }})
|
|
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
|
|
# Some VMs are expected to have high Read / Write rates z.B. CI servers
|
|
- alert: VirtualHostUnusualDiskReadRate
|
|
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
|
|
for: 10m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Virtual host unusual disk read rate for 10 min (instance {{ $labels.instance }})
|
|
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
|
|
- alert: VirtualHostUnusualDiskWriteRate
|
|
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
|
|
for: 4m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Virtual host unusual disk write rate for 4 min (instance {{ $labels.instance }})
|
|
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
|
|
- alert: PhysicalHostUnusualDiskReadRate
|
|
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
|
|
for: 20m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Physical host unusual disk read rate (instance {{ $labels.instance }})
|
|
description: "Disk is probably reading too much data (> 100 MB/s)\n VALUE = {{ $value }}"
|
|
- alert: PhysicalHostUnusualDiskWriteRate
|
|
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
|
|
for: 15m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Physical host unusual disk write rate (instance {{ $labels.instance }})
|
|
description: "Disk is probably writing too much data (> 100 MB/s)\n VALUE = {{ $value }}"
|
|
# Please add ignored mountpoints in node_exporter parameters like
|
|
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
|
|
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
|
|
- alert: HostOutOfDiskSpace
|
|
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host out of disk space (instance {{ $labels.instance }})
|
|
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}"
|
|
# Please add ignored mountpoints in node_exporter parameters like
|
|
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
|
|
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
|
|
- alert: HostDiskWillFillIn24Hours
|
|
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
|
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}"
|
|
- alert: HostOutOfInodes
|
|
expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host out of inodes (instance {{ $labels.instance }})
|
|
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}"
|
|
- alert: HostInodesWillFillIn24Hours
|
|
expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
|
|
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}"
|
|
- alert: HostFilesystemDeviceError
|
|
expr: node_filesystem_device_error == 1
|
|
for: 2m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Host filesystem device error (instance {{ $labels.instance }})
|
|
description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}"
|
|
- alert: HostUnusualDiskReadLatency
|
|
expr: (rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host unusual disk read latency (instance {{ $labels.instance }})
|
|
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}"
|
|
- alert: HostUnusualDiskWriteLatency
|
|
expr: (rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host unusual disk write latency (instance {{ $labels.instance }})
|
|
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}"
|
|
- alert: HostHighCpuLoad
|
|
expr: (sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 10m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host high CPU load (instance {{ $labels.instance }})
|
|
description: "CPU load is > 80%\n VALUE = {{ $value }}"
|
|
# We might want to introduce that later, tho maybe excluding hosts with one core, if possible and only for VMs?
|
|
# # You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
|
|
# - alert: HostCpuIsUnderutilized
|
|
# expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
# for: 1w
|
|
# labels:
|
|
# severity: info
|
|
# annotations:
|
|
# summary: Host CPU is underutilized (instance {{ $labels.instance }})
|
|
# description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}"
|
|
- alert: HostCpuStealNoisyNeighbor
|
|
expr: (avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
|
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}"
|
|
- alert: HostCpuHighIowait
|
|
expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host CPU high iowait (instance {{ $labels.instance }})
|
|
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}"
|
|
# Have different disk IO alerts for VMs and physical machines and for physical machines different ones for hard and other disks.
|
|
- alert: PhysicalHostUnusualHardDiskIo
|
|
expr: (rate(node_disk_io_time_seconds_total{device=~"s.+"}[1m]) > 0.75) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Physical host unusual hard disk IO (instance {{ $labels.instance }})
|
|
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
|
|
- alert: PhysicalHostUnusualOtherDiskIo
|
|
expr: (rate(node_disk_io_time_seconds_total{device!~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Physical host unusual other (non-hard) disk IO (instance {{ $labels.instance }})
|
|
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
|
|
- alert: VirtualHostUnusualDiskIo
|
|
expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Virtual host unusual disk IO (instance {{ $labels.instance }})
|
|
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
|
|
# # x2 context switches is an arbitrary number.
|
|
# # The alert threshold depends on the nature of the application.
|
|
# # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58
|
|
# - alert: HostContextSwitchingHigh
|
|
# expr: (rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) / (rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2
|
|
# for: 0m
|
|
# labels:
|
|
# severity: warning
|
|
# annotations:
|
|
# summary: Host context switching high (instance {{ $labels.instance }})
|
|
# description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}"
|
|
- alert: HostSwapIsFillingUp
|
|
expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host swap is filling up (instance {{ $labels.instance }})
|
|
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}"
|
|
- alert: HostSystemdServiceCrashed
|
|
expr: (node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host systemd service crashed (instance {{ $labels.instance }})
|
|
description: "systemd service crashed\n VALUE = {{ $value }}"
|
|
- alert: HostPhysicalComponentTooHot
|
|
expr: ((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host physical component too hot (instance {{ $labels.instance }})
|
|
description: "Physical hardware component too hot\n VALUE = {{ $value }}"
|
|
- alert: HostNodeOvertemperatureAlarm
|
|
expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
|
|
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}"
|
|
- alert: HostRaidArrayGotInactive
|
|
expr: (node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Host RAID array got inactive (instance {{ $labels.instance }})
|
|
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}"
|
|
- alert: HostRaidDiskFailure
|
|
expr: (node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host RAID disk failure (instance {{ $labels.instance }})
|
|
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}"
|
|
- alert: HostKernelVersionDeviations
|
|
expr: (count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 6h
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host kernel version deviations (instance {{ $labels.instance }})
|
|
description: "Different kernel versions are running\n VALUE = {{ $value }}"
|
|
- alert: HostOomKillDetected
|
|
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
|
description: "OOM kill detected\n VALUE = {{ $value }}"
|
|
- alert: HostEdacCorrectableErrorsDetected
|
|
expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: info
|
|
annotations:
|
|
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
|
|
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
|
|
- alert: HostEdacUncorrectableErrorsDetected
|
|
expr: (node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
|
|
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
|
|
- alert: HostNetworkReceiveErrors
|
|
expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host Network Receive Errors (instance {{ $labels.instance }})
|
|
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}"
|
|
- alert: HostNetworkTransmitErrors
|
|
expr: (rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
|
|
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}"
|
|
- alert: HostNetworkBondDegraded
|
|
expr: ((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
|
|
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}"
|
|
- alert: HostConntrackLimit
|
|
expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host conntrack limit (instance {{ $labels.instance }})
|
|
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}"
|
|
- alert: HostClockSkew
|
|
expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 10m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host clock skew (instance {{ $labels.instance }})
|
|
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}"
|
|
- alert: HostClockNotSynchronising
|
|
expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 2m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Host clock not synchronising (instance {{ $labels.instance }})
|
|
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}"
|
|
- alert: HostRequiresReboot
|
|
expr: (node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
|
|
for: 4h
|
|
labels:
|
|
severity: info
|
|
annotations:
|
|
summary: Host requires reboot (instance {{ $labels.instance }})
|
|
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}"
|
|
- name: prometheus
|
|
rules:
|
|
- alert: PrometheusJobMissing
|
|
expr: absent(up{job="prometheus"})
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus job missing (instance {{ $labels.instance }})
|
|
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTargetMissing
|
|
expr: up == 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus target missing (instance {{ $labels.instance }})
|
|
description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusAllTargetsMissing
|
|
expr: sum by (job) (up) == 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus all targets missing (instance {{ $labels.instance }})
|
|
description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusConfigurationReloadFailure
|
|
expr: prometheus_config_last_reload_successful != 1
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
|
|
description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTooManyRestarts
|
|
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus too many restarts (instance {{ $labels.instance }})
|
|
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusAlertmanagerJobMissing
|
|
expr: absent(up{job="alertmanager"})
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
|
|
description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
|
|
- alert: PrometheusAlertmanagerConfigurationReloadFailure
|
|
expr: alertmanager_config_last_reload_successful != 1
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
|
|
description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
|
|
- alert: PrometheusAlertmanagerConfigNotSynced
|
|
expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }})
|
|
description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}"
|
|
# For testing.
|
|
# - alert: PrometheusAlertmanagerE2eDeadManSwitch
|
|
# expr: vector(1)
|
|
# for: 0m
|
|
# labels:
|
|
# severity: critical
|
|
# annotations:
|
|
# summary: Prometheus AlertManager E2E dead man switch (instance {{ $labels.instance }})
|
|
# description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusNotConnectedToAlertmanager
|
|
expr: prometheus_notifications_alertmanagers_discovered < 1
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
|
|
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
|
|
- alert: PrometheusRuleEvaluationFailures
|
|
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTemplateTextExpansionFailures
|
|
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusRuleEvaluationSlow
|
|
expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
|
|
description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusNotificationsBacklog
|
|
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
|
|
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
|
|
- alert: PrometheusAlertmanagerNotificationFailing
|
|
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
|
|
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTargetEmpty
|
|
expr: prometheus_sd_discovered_targets == 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus target empty (instance {{ $labels.instance }})
|
|
description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTargetScrapingSlow
|
|
expr: prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus target scraping slow (instance {{ $labels.instance }})
|
|
description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}"
|
|
- alert: PrometheusLargeScrape
|
|
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
|
|
for: 5m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus large scrape (instance {{ $labels.instance }})
|
|
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTargetScrapeDuplicate
|
|
expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
|
|
description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbCheckpointCreationFailures
|
|
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbCheckpointDeletionFailures
|
|
expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbCompactionsFailed
|
|
expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbHeadTruncationsFailed
|
|
expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbReloadFailures
|
|
expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbWalCorruptions
|
|
expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTsdbWalTruncationsFailed
|
|
expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
|
|
for: 0m
|
|
labels:
|
|
severity: critical
|
|
annotations:
|
|
summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
|
|
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"
|
|
- alert: PrometheusTimeseriesCardinality
|
|
expr: label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000
|
|
for: 0m
|
|
labels:
|
|
severity: warning
|
|
annotations:
|
|
summary: Prometheus timeseries cardinality (instance {{ $labels.instance }})
|
|
description: "The \"{{ $labels.name }}\" timeseries cardinality is getting very high: {{ $value }}\n VALUE = {{ $value }}"
|