From ac7e8bb6f22e73e591abec4b2000f11291fa2e59 Mon Sep 17 00:00:00 2001
From: June <june@jsts.xyz>
Date: Sat, 15 Feb 2025 06:05:44 +0100
Subject: [PATCH] grafana: set dur. for Prom. hyperv. disk rw rate and hdd io
 aler. to 90m

Set duration for Prometheus hypervisor disk rw rate and hard disk io
alerts to 90m to account for the very long running (over an hour) backup
job.
---
 .../grafana/docker_compose/prometheus_alerts.rules.yaml     | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml b/resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml
index 5cc54e9..f684385 100644
--- a/resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml
+++ b/resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml
@@ -166,7 +166,7 @@ groups:
       # Longer intervals to account for disk intensive hypervisor tasks (backups, moving VMs, etc.).
       - alert: HypervisorHostUnusualDiskReadRate
         expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
-        for: 60m
+        for: 90m
         labels:
           severity: warning
         annotations:
@@ -174,7 +174,7 @@ groups:
           description: "Disk is probably reading too much data (> 50 MB/s)\n  VALUE = {{ $value }}"
       - alert: HypervisorHostUnusualDiskWriteRate
         expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
-        for: 60m
+        for: 90m
         labels:
           severity: warning
         annotations:
@@ -256,7 +256,7 @@ groups:
       # Since hard disks on the hypervisor can easily have their IO saturated by hypervisor tasks (backups, moving VMs, etc.), alert when the IO is above the regular threshold for a very long time.
       - alert: HypervisorHostUnusualHardDiskIo
         expr: (rate(node_disk_io_time_seconds_total{device=~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
-        for: 50m
+        for: 90m
         labels:
           severity: warning
         annotations: