feat: refactor Grafana alerting rules into a consolidated system module and update individual alert files

This commit is contained in:
2025-05-31 09:57:03 +02:00
parent 8b5fb0861d
commit 35fa61ef34
7 changed files with 281 additions and 299 deletions

View File

@@ -1,66 +1,58 @@
{ lib, pkgs, config, ... }: { lib, pkgs, config, ... }:
{ {
services.grafana.provision.alerting.rules.settings.groups = [ grafanaAlertRuleDefinitions = [
{ {
name = "CPUUsageAlerts"; uid = "high-cpu-usage-alert-uid";
folder = "System Alerts"; title = "HighCPUUsage";
interval = "1m"; condition = "D"; # Condition is now D
rules = [ data = [
# Query A: Calculate CPU usage percentage
{ {
uid = "high-cpu-usage-alert-uid"; refId = "A";
title = "HighCPUUsage"; datasourceUid = "vm-datasource-uid";
condition = "D"; # Condition is now D queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
data = [ model = {
# Query A: Calculate CPU usage percentage # Calculate average CPU usage over 1m, grouped by instance and job
{ expr = ''(1 - avg by (instance, job) (rate(node_cpu_seconds_total{mode="idle"}[1m]))) * 100'';
refId = "A"; legendFormat = "CPU usage on {{instance}} ({{job}})";
datasourceUid = "vm-datasource-uid"; instant = false; # This is a range query
queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
model = {
# Calculate average CPU usage over 1m, grouped by instance and job
expr = ''(1 - avg by (instance, job) (rate(node_cpu_seconds_total{mode="idle"}[1m]))) * 100'';
legendFormat = "CPU usage on {{instance}} ({{job}})";
instant = false; # This is a range query
};
}
# Expression C: Reduce Query A to its last value, preserving labels
{
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 90"; # Alert if CPU usage from C is > 90%
};
}
];
for = "5m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High CPU usage on {{ $labels.instance }}";
description = ''CPU usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 5 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
}; };
labels = { }
severity = "warning"; # Expression C: Reduce Query A to its last value, preserving labels
category = "performance"; {
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 90"; # Alert if CPU usage from C is > 90%
}; };
} }
]; ];
for = "5m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High CPU usage on {{ $labels.instance }}";
description = ''CPU usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 5 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
};
labels = {
severity = "warning";
category = "performance";
};
} }
]; ];
} }

View File

@@ -1,85 +1,76 @@
{ lib, pkgs, config, ... }: { lib, pkgs, config, ... }:
{ {
services.grafana.provision.alerting.rules.settings.groups = [ grafanaAlertRuleDefinitions = [
{ {
# orgId = 1; # Defaults to 1 for provisioned rules uid = "high-disk-usage-alert-uid"; # Optional: provide a stable UID for the rule itself
name = "DiskUsageAlerts"; # Name of the rule group title = "HighDiskUsage"; # Name of the alert rule (was 'alert' in vmalert)
folder = "System Alerts"; # The folder these rules belong to in Grafana UI
interval = "1m"; # How often to evaluate rules in this group
rules = [ # Condition for the alert to fire. 'D' refers to the refId of the threshold expression.
condition = "D"; # Condition is now D
# Removed rule-level relativeTimeRange
# Data queries and expressions
data = [
# Query A: Calculate disk usage percentage
{ {
uid = "high-disk-usage-alert-uid"; # Optional: provide a stable UID for the rule itself refId = "A";
title = "HighDiskUsage"; # Name of the alert rule (was 'alert' in vmalert) datasourceUid = "vm-datasource-uid"; # UID of the VictoriaMetrics datasource
queryType = "prometheus"; # Explicitly set, though often inferred
# Condition for the alert to fire. 'C' refers to the refId of the threshold expression. relativeTimeRange = { from = 60; to = 0; }; # Query-level, integer seconds
condition = "D"; # Condition is now D model = {
# Removed rule-level relativeTimeRange expr = ''
(
# Data queries and expressions node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
data = [ ) / (node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
# Query A: Calculate disk usage percentage and node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
{ and node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
refId = "A"; '';
datasourceUid = "vm-datasource-uid"; # UID of the VictoriaMetrics datasource legendFormat = "{{mountpoint}} on {{instance}}"; # Example legend
queryType = "prometheus"; # Explicitly set, though often inferred instant = false; # For range queries, default is false
relativeTimeRange = { from = 60; to = 0; }; # Query-level, integer seconds
model = {
expr = ''
(
node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
) / (node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
and node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
and node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
'';
legendFormat = "{{mountpoint}} on {{instance}}"; # Example legend
instant = false; # For range queries, default is false
};
}
# Expression C: Reduce Query A to its last value, preserving labels
{
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 85"; # Check if the last value from each series in C is > 85
};
}
];
for = "15m"; # Duration the condition must be met (same as vmalert)
# How to handle states where data is missing or query errors
noDataState = "NoData"; # Options: NoData, Alerting, OK
execErrState = "Error"; # Options: Error, Alerting, OK
annotations = {
summary = "High disk usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
description = ''
Disk usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }}
(fstype: {{ $labels.fstype }}) has been above 85% for more than 15 minutes.
Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.
''; # Using $values.C as it's the input to the math condition D
}; };
labels = { }
severity = "warning"; # Expression C: Reduce Query A to its last value, preserving labels
category = "capacity"; {
# Grafana automatically adds labels from the query result (instance, mountpoint, etc.) refId = "C";
# and labels from the rule group/folder. datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 85"; # Check if the last value from each series in C is > 85
}; };
# isPaused = false; # Default is not paused
} }
]; ];
for = "15m"; # Duration the condition must be met (same as vmalert)
# How to handle states where data is missing or query errors
noDataState = "NoData"; # Options: NoData, Alerting, OK
execErrState = "Error"; # Options: Error, Alerting, OK
annotations = {
summary = "High disk usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
description = ''
Disk usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }}
(fstype: {{ $labels.fstype }}) has been above 85% for more than 15 minutes.
Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.
''; # Using $values.C as it's the input to the math condition D
};
labels = {
severity = "warning";
category = "capacity";
# Grafana automatically adds labels from the query result (instance, mountpoint, etc.)
# and labels from the rule group/folder.
};
# isPaused = false; # Default is not paused
} }
]; ];
} }

View File

@@ -1,62 +1,54 @@
{ lib, pkgs, config, ... }: { lib, pkgs, config, ... }:
{ {
services.grafana.provision.alerting.rules.settings.groups = [ grafanaAlertRuleDefinitions = [
{ {
name = "HostStatusAlerts"; uid = "host-down-alert-uid";
folder = "System Alerts"; title = "HostDown";
interval = "1m"; condition = "C";
rules = [ data = [
{ {
uid = "host-down-alert-uid"; refId = "A";
title = "HostDown"; datasourceUid = "vm-datasource-uid";
condition = "C"; queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
data = [ model = {
{ expr = ''up'';
refId = "A"; legendFormat = "{{instance}} ({{job}})";
datasourceUid = "vm-datasource-uid"; instant = false; # Changed from true, as relativeTimeRange is used
queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
model = {
expr = ''up'';
legendFormat = "{{instance}} ({{job}})";
instant = false; # Changed from true, as relativeTimeRange is used
};
}
{ # New Expression B: Reduce Query A
refId = "B";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
{ # Modified Expression C: Math condition based on B
refId = "C";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$B == 0"; # Check if the last value from B is 0
};
}
];
for = "2m";
noDataState = "Alerting";
execErrState = "Error";
annotations = {
summary = "Host {{ $labels.instance }} is down";
description = ''Host {{ $labels.instance }} (job: {{ $labels.job }}) has been down for more than 2 minutes.'';
}; };
labels = { }
severity = "critical"; { # New Expression B: Reduce Query A
category = "availability"; refId = "B";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
{ # Modified Expression C: Math condition based on B
refId = "C";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$B == 0"; # Check if the last value from B is 0
}; };
} }
]; ];
for = "2m";
noDataState = "Alerting";
execErrState = "Error";
annotations = {
summary = "Host {{ $labels.instance }} is down";
description = ''Host {{ $labels.instance }} (job: {{ $labels.job }}) has been down for more than 2 minutes.'';
};
labels = {
severity = "critical";
category = "availability";
};
} }
]; ];
} }

View File

@@ -1,71 +1,63 @@
{ lib, pkgs, config, ... }: { lib, pkgs, config, ... }:
{ {
services.grafana.provision.alerting.rules.settings.groups = [ grafanaAlertRuleDefinitions = [
{ {
name = "InodeUsageAlerts"; uid = "high-inode-usage-alert-uid";
folder = "System Alerts"; title = "HighInodeUsage";
interval = "1m"; condition = "D"; # Condition is now D
rules = [ data = [
# Query A: Calculate inode usage percentage
{ {
uid = "high-inode-usage-alert-uid"; refId = "A";
title = "HighInodeUsage"; datasourceUid = "vm-datasource-uid";
condition = "D"; # Condition is now D queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; };
data = [ model = {
# Query A: Calculate inode usage percentage expr = ''
{ (
refId = "A"; node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
datasourceUid = "vm-datasource-uid"; ) / (node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
queryType = "prometheus"; and node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""}
relativeTimeRange = { from = 60; to = 0; }; and node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
model = { '';
expr = '' legendFormat = "{{mountpoint}} on {{instance}}";
( instant = false;
node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
) / (node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
and node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""}
and node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
'';
legendFormat = "{{mountpoint}} on {{instance}}";
instant = false;
};
}
# Expression C: Reduce Query A to its last value, preserving labels
{
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 80"; # Alert if inode usage from C is > 80%
};
}
];
for = "30m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High inode usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
description = ''Inode usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }} (fstype: {{ $labels.fstype }}) has been above 80% for more than 30 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
}; };
labels = { }
severity = "warning"; # Expression C: Reduce Query A to its last value, preserving labels
category = "capacity"; {
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 80"; # Alert if inode usage from C is > 80%
}; };
} }
]; ];
for = "30m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High inode usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
description = ''Inode usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }} (fstype: {{ $labels.fstype }}) has been above 80% for more than 30 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
};
labels = {
severity = "warning";
category = "capacity";
};
} }
]; ];
} }

View File

@@ -1,69 +1,61 @@
{ lib, pkgs, config, ... }: { lib, pkgs, config, ... }:
{ {
services.grafana.provision.alerting.rules.settings.groups = [ grafanaAlertRuleDefinitions = [
{ {
name = "RAMUsageAlerts"; uid = "high-ram-usage-alert-uid";
folder = "System Alerts"; title = "HighRAMUsage";
interval = "1m"; condition = "D"; # Condition is now D
rules = [ data = [
# Query A: Calculate RAM usage percentage
{ {
uid = "high-ram-usage-alert-uid"; refId = "A";
title = "HighRAMUsage"; datasourceUid = "vm-datasource-uid";
condition = "D"; # Condition is now D queryType = "prometheus";
relativeTimeRange = { from = 60; to = 0; };
data = [ model = {
# Query A: Calculate RAM usage percentage expr = ''
{ (1 - node_memory_MemAvailable_bytes / (node_memory_MemTotal_bytes > 0)) * 100
refId = "A"; and node_memory_MemAvailable_bytes
datasourceUid = "vm-datasource-uid"; and node_memory_MemTotal_bytes
queryType = "prometheus"; '';
relativeTimeRange = { from = 60; to = 0; }; legendFormat = "RAM usage on {{instance}} ({{job}})";
model = { instant = false;
expr = ''
(1 - node_memory_MemAvailable_bytes / (node_memory_MemTotal_bytes > 0)) * 100
and node_memory_MemAvailable_bytes
and node_memory_MemTotal_bytes
'';
legendFormat = "RAM usage on {{instance}} ({{job}})";
instant = false;
};
}
# Expression C: Reduce Query A to its last value, preserving labels
{
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 90"; # Alert if RAM usage from C is > 90%
};
}
];
for = "10m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High RAM usage on {{ $labels.instance }}";
description = ''RAM usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 10 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
}; };
labels = { }
severity = "warning"; # Expression C: Reduce Query A to its last value, preserving labels
category = "performance"; {
refId = "C";
datasourceUid = "__expr__";
model = {
type = "reduce";
expression = "A"; # Input is Query A
reducer = "last"; # Get the last value of each series in A
};
}
# Expression D: Apply math condition to the reduced values from C
{
refId = "D";
datasourceUid = "__expr__";
model = {
type = "math";
expression = "$C > 90"; # Alert if RAM usage from C is > 90%
}; };
} }
]; ];
for = "10m"; # Duration the condition must be met
noDataState = "NoData";
execErrState = "Error";
annotations = {
summary = "High RAM usage on {{ $labels.instance }}";
description = ''RAM usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 10 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
};
labels = {
severity = "warning";
category = "performance";
};
} }
]; ];
} }

View File

@@ -0,0 +1,21 @@
{ lib, pkgs, config, ... }:
let
# Import rule definitions from refactored alert files in the parent 'alerting' directory
cpuAlertRules = (import ../cpu_usage.nix { inherit lib pkgs config; }).grafanaAlertRuleDefinitions;
diskAlertRules = (import ../disk_usage.nix { inherit lib pkgs config; }).grafanaAlertRuleDefinitions;
hostDownAlertRules = (import ../host_down.nix { inherit lib pkgs config; }).grafanaAlertRuleDefinitions;
inodeAlertRules = (import ../inode_usage.nix { inherit lib pkgs config; }).grafanaAlertRuleDefinitions;
ramAlertRules = (import ../ram_usage.nix { inherit lib pkgs config; }).grafanaAlertRuleDefinitions;
allSystemRules = cpuAlertRules ++ diskAlertRules ++ hostDownAlertRules ++ inodeAlertRules ++ ramAlertRules;
in
{
services.grafana.provision.alerting.rules.settings.groups = [
{
name = "System Alerts"; # This is the Grafana alert group name
folder = "System Alerts"; # This is the Grafana folder name
interval = "1m";
rules = allSystemRules;
}
];
}

View File

@@ -28,11 +28,13 @@ let
in in
{ {
imports = [ imports = [
./alerting/disk_usage.nix # Individual alert files removed, now handled by alerting/system/default.nix
./alerting/cpu_usage.nix # ./alerting/disk_usage.nix
./alerting/host_down.nix # ./alerting/cpu_usage.nix
./alerting/inode_usage.nix # ./alerting/host_down.nix
./alerting/ram_usage.nix # ./alerting/inode_usage.nix
# ./alerting/ram_usage.nix
./alerting/system/default.nix # Added: Imports the consolidated system alerts module
# ... other rule files can be added here ... # ... other rule files can be added here ...
./datasources/victoriametrics.nix ./datasources/victoriametrics.nix
]; ];
@@ -99,7 +101,7 @@ in
}; };
provision = { provision = {
alerting = { alerting = {
rules.settings.groups = lib.mkMerge []; # Allows rule groups to be merged rules.settings.groups = lib.mkMerge []; # Allows rule groups to be merged (including the one from system/default.nix)
contactPoints = { contactPoints = {
settings = { settings = {
apiVersion = 1; # As per Grafana provisioning API apiVersion = 1; # As per Grafana provisioning API