feat: restructure Grafana configuration, migrate alert rules to new format and add VictoriaMetrics datasource
This commit is contained in:
66
hosts/web-arm/modules/grafana/alerting/cpu_usage.nix
Normal file
66
hosts/web-arm/modules/grafana/alerting/cpu_usage.nix
Normal file
@@ -0,0 +1,66 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.alerting.rules.settings.groups = [
|
||||
{
|
||||
name = "CPUUsageAlerts";
|
||||
folder = "System Alerts";
|
||||
interval = "1m";
|
||||
|
||||
rules = [
|
||||
{
|
||||
uid = "high-cpu-usage-alert-uid";
|
||||
title = "HighCPUUsage";
|
||||
condition = "D"; # Condition is now D
|
||||
|
||||
data = [
|
||||
# Query A: Calculate CPU usage percentage
|
||||
{
|
||||
refId = "A";
|
||||
datasourceUid = "vm-datasource-uid";
|
||||
queryType = "prometheus";
|
||||
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
|
||||
model = {
|
||||
# Calculate average CPU usage over 1m, grouped by instance and job
|
||||
expr = ''(1 - avg by (instance, job) (rate(node_cpu_seconds_total{mode="idle"}[1m]))) * 100'';
|
||||
legendFormat = "CPU usage on {{instance}} ({{job}})";
|
||||
instant = false; # This is a range query
|
||||
};
|
||||
}
|
||||
# Expression C: Reduce Query A to its last value, preserving labels
|
||||
{
|
||||
refId = "C";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "reduce";
|
||||
expression = "A"; # Input is Query A
|
||||
reducer = "last"; # Get the last value of each series in A
|
||||
};
|
||||
}
|
||||
# Expression D: Apply math condition to the reduced values from C
|
||||
{
|
||||
refId = "D";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "math";
|
||||
expression = "$C > 90"; # Alert if CPU usage from C is > 90%
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
for = "5m"; # Duration the condition must be met
|
||||
noDataState = "NoData";
|
||||
execErrState = "Error";
|
||||
|
||||
annotations = {
|
||||
summary = "High CPU usage on {{ $labels.instance }}";
|
||||
description = ''CPU usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 5 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
|
||||
};
|
||||
labels = {
|
||||
severity = "warning";
|
||||
category = "performance";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
85
hosts/web-arm/modules/grafana/alerting/disk_usage.nix
Normal file
85
hosts/web-arm/modules/grafana/alerting/disk_usage.nix
Normal file
@@ -0,0 +1,85 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.alerting.rules.settings.groups = [
|
||||
{
|
||||
# orgId = 1; # Defaults to 1 for provisioned rules
|
||||
name = "DiskUsageAlerts"; # Name of the rule group
|
||||
folder = "System Alerts"; # The folder these rules belong to in Grafana UI
|
||||
interval = "1m"; # How often to evaluate rules in this group
|
||||
|
||||
rules = [
|
||||
{
|
||||
uid = "high-disk-usage-alert-uid"; # Optional: provide a stable UID for the rule itself
|
||||
title = "HighDiskUsage"; # Name of the alert rule (was 'alert' in vmalert)
|
||||
|
||||
# Condition for the alert to fire. 'C' refers to the refId of the threshold expression.
|
||||
condition = "D"; # Condition is now D
|
||||
# Removed rule-level relativeTimeRange
|
||||
|
||||
# Data queries and expressions
|
||||
data = [
|
||||
# Query A: Calculate disk usage percentage
|
||||
{
|
||||
refId = "A";
|
||||
datasourceUid = "vm-datasource-uid"; # UID of the VictoriaMetrics datasource
|
||||
queryType = "prometheus"; # Explicitly set, though often inferred
|
||||
relativeTimeRange = { from = 60; to = 0; }; # Query-level, integer seconds
|
||||
model = {
|
||||
expr = ''
|
||||
(
|
||||
node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
) / (node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
|
||||
and node_filesystem_size_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
and node_filesystem_avail_bytes{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
'';
|
||||
legendFormat = "{{mountpoint}} on {{instance}}"; # Example legend
|
||||
instant = false; # For range queries, default is false
|
||||
};
|
||||
}
|
||||
# Expression C: Reduce Query A to its last value, preserving labels
|
||||
{
|
||||
refId = "C";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "reduce";
|
||||
expression = "A"; # Input is Query A
|
||||
reducer = "last"; # Get the last value of each series in A
|
||||
};
|
||||
}
|
||||
# Expression D: Apply math condition to the reduced values from C
|
||||
{
|
||||
refId = "D";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "math";
|
||||
expression = "$C > 85"; # Check if the last value from each series in C is > 85
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
for = "15m"; # Duration the condition must be met (same as vmalert)
|
||||
|
||||
# How to handle states where data is missing or query errors
|
||||
noDataState = "NoData"; # Options: NoData, Alerting, OK
|
||||
execErrState = "Error"; # Options: Error, Alerting, OK
|
||||
|
||||
annotations = {
|
||||
summary = "High disk usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
|
||||
description = ''
|
||||
Disk usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }}
|
||||
(fstype: {{ $labels.fstype }}) has been above 85% for more than 15 minutes.
|
||||
Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.
|
||||
''; # Using $values.C as it's the input to the math condition D
|
||||
};
|
||||
labels = {
|
||||
severity = "warning";
|
||||
category = "capacity";
|
||||
# Grafana automatically adds labels from the query result (instance, mountpoint, etc.)
|
||||
# and labels from the rule group/folder.
|
||||
};
|
||||
# isPaused = false; # Default is not paused
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
62
hosts/web-arm/modules/grafana/alerting/host_down.nix
Normal file
62
hosts/web-arm/modules/grafana/alerting/host_down.nix
Normal file
@@ -0,0 +1,62 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.alerting.rules.settings.groups = [
|
||||
{
|
||||
name = "HostStatusAlerts";
|
||||
folder = "System Alerts";
|
||||
interval = "1m";
|
||||
|
||||
rules = [
|
||||
{
|
||||
uid = "host-down-alert-uid";
|
||||
title = "HostDown";
|
||||
condition = "C";
|
||||
|
||||
data = [
|
||||
{
|
||||
refId = "A";
|
||||
datasourceUid = "vm-datasource-uid";
|
||||
queryType = "prometheus";
|
||||
relativeTimeRange = { from = 60; to = 0; }; # Query over the last minute
|
||||
model = {
|
||||
expr = ''up'';
|
||||
legendFormat = "{{instance}} ({{job}})";
|
||||
instant = false; # Changed from true, as relativeTimeRange is used
|
||||
};
|
||||
}
|
||||
{ # New Expression B: Reduce Query A
|
||||
refId = "B";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "reduce";
|
||||
expression = "A"; # Input is Query A
|
||||
reducer = "last"; # Get the last value of each series in A
|
||||
};
|
||||
}
|
||||
{ # Modified Expression C: Math condition based on B
|
||||
refId = "C";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "math";
|
||||
expression = "$B == 0"; # Check if the last value from B is 0
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
for = "2m";
|
||||
noDataState = "Alerting";
|
||||
execErrState = "Error";
|
||||
|
||||
annotations = {
|
||||
summary = "Host {{ $labels.instance }} is down";
|
||||
description = ''Host {{ $labels.instance }} (job: {{ $labels.job }}) has been down for more than 2 minutes.'';
|
||||
};
|
||||
labels = {
|
||||
severity = "critical";
|
||||
category = "availability";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
71
hosts/web-arm/modules/grafana/alerting/inode_usage.nix
Normal file
71
hosts/web-arm/modules/grafana/alerting/inode_usage.nix
Normal file
@@ -0,0 +1,71 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.alerting.rules.settings.groups = [
|
||||
{
|
||||
name = "InodeUsageAlerts";
|
||||
folder = "System Alerts";
|
||||
interval = "1m";
|
||||
|
||||
rules = [
|
||||
{
|
||||
uid = "high-inode-usage-alert-uid";
|
||||
title = "HighInodeUsage";
|
||||
condition = "D"; # Condition is now D
|
||||
|
||||
data = [
|
||||
# Query A: Calculate inode usage percentage
|
||||
{
|
||||
refId = "A";
|
||||
datasourceUid = "vm-datasource-uid";
|
||||
queryType = "prometheus";
|
||||
relativeTimeRange = { from = 60; to = 0; };
|
||||
model = {
|
||||
expr = ''
|
||||
(
|
||||
node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} - node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
) / (node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""} > 0) * 100
|
||||
and node_filesystem_files{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
and node_filesystem_files_free{fstype!~"tmpfs|rootfs",mountpoint!=""}
|
||||
'';
|
||||
legendFormat = "{{mountpoint}} on {{instance}}";
|
||||
instant = false;
|
||||
};
|
||||
}
|
||||
# Expression C: Reduce Query A to its last value, preserving labels
|
||||
{
|
||||
refId = "C";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "reduce";
|
||||
expression = "A"; # Input is Query A
|
||||
reducer = "last"; # Get the last value of each series in A
|
||||
};
|
||||
}
|
||||
# Expression D: Apply math condition to the reduced values from C
|
||||
{
|
||||
refId = "D";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "math";
|
||||
expression = "$C > 80"; # Alert if inode usage from C is > 80%
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
for = "30m"; # Duration the condition must be met
|
||||
noDataState = "NoData";
|
||||
execErrState = "Error";
|
||||
|
||||
annotations = {
|
||||
summary = "High inode usage on {{ $labels.instance }} at {{ $labels.mountpoint }}";
|
||||
description = ''Inode usage on {{ $labels.instance }} for mount point {{ $labels.mountpoint }} (fstype: {{ $labels.fstype }}) has been above 80% for more than 30 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
|
||||
};
|
||||
labels = {
|
||||
severity = "warning";
|
||||
category = "capacity";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
69
hosts/web-arm/modules/grafana/alerting/ram_usage.nix
Normal file
69
hosts/web-arm/modules/grafana/alerting/ram_usage.nix
Normal file
@@ -0,0 +1,69 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.alerting.rules.settings.groups = [
|
||||
{
|
||||
name = "RAMUsageAlerts";
|
||||
folder = "System Alerts";
|
||||
interval = "1m";
|
||||
|
||||
rules = [
|
||||
{
|
||||
uid = "high-ram-usage-alert-uid";
|
||||
title = "HighRAMUsage";
|
||||
condition = "D"; # Condition is now D
|
||||
|
||||
data = [
|
||||
# Query A: Calculate RAM usage percentage
|
||||
{
|
||||
refId = "A";
|
||||
datasourceUid = "vm-datasource-uid";
|
||||
queryType = "prometheus";
|
||||
relativeTimeRange = { from = 60; to = 0; };
|
||||
model = {
|
||||
expr = ''
|
||||
(1 - node_memory_MemAvailable_bytes / (node_memory_MemTotal_bytes > 0)) * 100
|
||||
and node_memory_MemAvailable_bytes
|
||||
and node_memory_MemTotal_bytes
|
||||
'';
|
||||
legendFormat = "RAM usage on {{instance}} ({{job}})";
|
||||
instant = false;
|
||||
};
|
||||
}
|
||||
# Expression C: Reduce Query A to its last value, preserving labels
|
||||
{
|
||||
refId = "C";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "reduce";
|
||||
expression = "A"; # Input is Query A
|
||||
reducer = "last"; # Get the last value of each series in A
|
||||
};
|
||||
}
|
||||
# Expression D: Apply math condition to the reduced values from C
|
||||
{
|
||||
refId = "D";
|
||||
datasourceUid = "__expr__";
|
||||
model = {
|
||||
type = "math";
|
||||
expression = "$C > 90"; # Alert if RAM usage from C is > 90%
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
for = "10m"; # Duration the condition must be met
|
||||
noDataState = "NoData";
|
||||
execErrState = "Error";
|
||||
|
||||
annotations = {
|
||||
summary = "High RAM usage on {{ $labels.instance }}";
|
||||
description = ''RAM usage on {{ $labels.instance }} (job: {{ $labels.job }}) has been above 90% for more than 10 minutes. Current value: {{ if $values.C }}{{ $values.C | humanizePercentage }}{{ else }}N/A{{ end }}%.'';
|
||||
};
|
||||
labels = {
|
||||
severity = "warning";
|
||||
category = "performance";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
{ lib, pkgs, config, ... }:
|
||||
{
|
||||
services.grafana.provision.datasources.settings.datasources = [
|
||||
{
|
||||
name = "VictoriaMetrics";
|
||||
uid = "vm-datasource-uid"; # Stable UID for referencing in alerts
|
||||
type = "prometheus";
|
||||
url = "http://localhost:8428"; # URL of VictoriaMetrics
|
||||
access = "proxy"; # Grafana proxies requests
|
||||
isDefault = true; # Optional: make this the default datasource
|
||||
jsonData = {
|
||||
# timeInterval = "30s"; # Optional: Scrape interval if different from Grafana's default
|
||||
# httpMethod = "POST"; # Optional: if VictoriaMetrics prefers POST for queries
|
||||
};
|
||||
editable = false; # Recommended for provisioned datasources
|
||||
}
|
||||
];
|
||||
}
|
||||
199
hosts/web-arm/modules/grafana/default.nix
Normal file
199
hosts/web-arm/modules/grafana/default.nix
Normal file
@@ -0,0 +1,199 @@
|
||||
{ lib, pkgs, config, ...}:
|
||||
let
|
||||
ldap = pkgs.writeTextFile {
|
||||
name = "ldap.toml";
|
||||
text = ''
|
||||
[[servers]]
|
||||
host = "ldap.cloonar.com"
|
||||
port = 636
|
||||
use_ssl = true
|
||||
bind_dn = "cn=grafana,ou=system,ou=users,dc=cloonar,dc=com"
|
||||
bind_password = "$__file{/run/secrets/grafana-ldap-password}"
|
||||
search_filter = "(&(objectClass=cloonarUser)(mail=%s))"
|
||||
search_base_dns = ["ou=users,dc=cloonar,dc=com"]
|
||||
|
||||
[servers.attributes]
|
||||
name = "givenName"
|
||||
surname = "sn"
|
||||
username = "mail"
|
||||
email = "mail"
|
||||
member_of = "memberOf"
|
||||
|
||||
[[servers.group_mappings]]
|
||||
group_dn = "cn=Administrators,ou=groups,dc=cloonar,dc=com"
|
||||
org_role = "Admin"
|
||||
grafana_admin = true # Available in Grafana v5.3 and above
|
||||
'';
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./alerting/disk_usage.nix
|
||||
./alerting/cpu_usage.nix
|
||||
./alerting/host_down.nix
|
||||
./alerting/inode_usage.nix
|
||||
./alerting/ram_usage.nix
|
||||
# ... other rule files can be added here ...
|
||||
./datasources/victoriametrics.nix
|
||||
];
|
||||
|
||||
systemd.services.grafana.script = lib.mkBefore ''
|
||||
export GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=$(cat /run/secrets/grafana-oauth-secret)
|
||||
export PUSHOVER_API_TOKEN=$(cat /run/secrets/pushover-api-token)
|
||||
export PUSHOVER_USER_KEY=$(cat /run/secrets/pushover-user-key)
|
||||
'';
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
analytics.reporting_enabled = false;
|
||||
"auth.ldap".enabled = true;
|
||||
"auth.ldap".config_file = toString ldap;
|
||||
|
||||
"auth.generic_oauth" = {
|
||||
enabled = true;
|
||||
name = "Authelia";
|
||||
icon = "signin";
|
||||
client_id = "grafana";
|
||||
scopes = "openid profile email groups";
|
||||
empty_scopes = false;
|
||||
auth_url = "https://auth.cloonar.com/api/oidc/authorization";
|
||||
token_url = "https://auth.cloonar.com/api/oidc/token";
|
||||
api_url = "https://auth.cloonar.com/api/oidc/userinfo";
|
||||
login_attribute_path = "preferred_username";
|
||||
groups_attribute_path = "groups";
|
||||
role_attribute_path = "contains(groups, 'Administrators') && 'Admin' || contains(groups, 'editor') && 'Editor' || 'Viewer'";
|
||||
allow_assign_grafana_admin = true;
|
||||
name_attribute_path = "name";
|
||||
use_pkce = true;
|
||||
};
|
||||
|
||||
"auth.anonymous".enabled = true;
|
||||
"auth.anonymous".org_name = "Cloonar e.U.";
|
||||
"auth.anonymous".org_role = "Viewer";
|
||||
|
||||
server = {
|
||||
root_url = "https://grafana.cloonar.com";
|
||||
domain = "grafana.cloonar.com";
|
||||
enforce_domain = true;
|
||||
enable_gzip = true;
|
||||
http_addr = "0.0.0.0";
|
||||
http_port = 3001;
|
||||
};
|
||||
|
||||
smtp = {
|
||||
enabled = true;
|
||||
host = "mail.cloonar.com:587";
|
||||
user = "grafana@cloonar.com";
|
||||
password = "$__file{${config.sops.secrets.grafana-ldap-password.path}}";
|
||||
fromAddress = "grafana@cloonar.com";
|
||||
};
|
||||
|
||||
database = {
|
||||
type = "postgres";
|
||||
name = "grafana";
|
||||
host = "/run/postgresql";
|
||||
user = "grafana";
|
||||
};
|
||||
|
||||
security.admin_password = "$__file{${config.sops.secrets.grafana-admin-password.path}}";
|
||||
};
|
||||
provision = {
|
||||
alerting = {
|
||||
rules.settings.groups = lib.mkMerge []; # Allows rule groups to be merged
|
||||
contactPoints = {
|
||||
settings = {
|
||||
apiVersion = 1; # As per Grafana provisioning API
|
||||
contactPoints = [{
|
||||
orgId = 1;
|
||||
name = "cp_dominik";
|
||||
receivers = [{
|
||||
uid = "dominik_pushover_cp_receiver"; # Made UID even more specific
|
||||
type = "pushover";
|
||||
settings = {
|
||||
apiToken = "\${PUSHOVER_API_TOKEN}";
|
||||
userKey = "\${PUSHOVER_USER_KEY}";
|
||||
device = "iphone";
|
||||
priority = 2;
|
||||
retry = "30s";
|
||||
expire = "2m";
|
||||
sound = "siren";
|
||||
okSound = "magic";
|
||||
message = ''
|
||||
{{ template "default.message" . }}
|
||||
'';
|
||||
};
|
||||
}];
|
||||
}];
|
||||
};
|
||||
};
|
||||
|
||||
policies = { # Corrected from notificationPolicies to policies
|
||||
settings = {
|
||||
apiVersion = 1; # As per Grafana provisioning API
|
||||
|
||||
# Grafana's new unified alerting expects a single policy tree per org.
|
||||
# For OrgID 1 (default), this defines the root of that tree.
|
||||
# The NixOS module should translate this into the correct YAML structure.
|
||||
# The `policies` attribute within `settings` usually takes a list of policy trees.
|
||||
# For a single default organization, we define one policy tree.
|
||||
# Grafana's own YAML examples show a top-level 'route' for the default policy,
|
||||
# or a list under 'policies' if you're managing multiple policy sets (less common for basic setup).
|
||||
# Given the NixOS option `services.grafana.provision.alerting.policies.settings.policies`,
|
||||
# it's likely expecting a list here.
|
||||
policies = [{ # This outer list corresponds to the `policies` option
|
||||
# orgId = 1; # Usually implicit for the default policy file, but can be specified
|
||||
receiver = "cp_dominik"; # This sets the default receiver for the root route
|
||||
|
||||
# The actual routing tree starts here.
|
||||
# For a simple setup where all alerts go to one receiver,
|
||||
# just setting the top-level 'receiver' is often enough.
|
||||
# If more complex routing is needed, 'routes' would be defined here.
|
||||
# Example:
|
||||
# route = {
|
||||
# receiver = "cp_dominik";
|
||||
# group_by = [ "alertname", "job" ];
|
||||
# # ... other root route settings
|
||||
# routes = [
|
||||
# {
|
||||
# matcher_re = { severity = "critical" };
|
||||
# receiver = "critical_alerts_receiver"; # Another contact point
|
||||
# continue = false;
|
||||
# },
|
||||
# # ... other specific routes
|
||||
# ];
|
||||
# };
|
||||
# For the simplest case, just defining the receiver at this level should work
|
||||
# as the root policy for the default organization.
|
||||
}];
|
||||
# resetPolicies = false; # Default, set to true to remove existing policies not in this config.
|
||||
};
|
||||
};
|
||||
};
|
||||
datasources.settings.datasources = lib.mkMerge []; # Allows datasources to be merged
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."grafana.cloonar.com" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
acmeRoot = null;
|
||||
locations."/".extraConfig = "proxy_pass http://localhost:3001;";
|
||||
};
|
||||
|
||||
services.postgresql.ensureUsers = [
|
||||
{
|
||||
name = "grafana";
|
||||
ensureDBOwnership = true;
|
||||
}
|
||||
];
|
||||
services.postgresql.ensureDatabases = [ "grafana" ];
|
||||
services.postgresqlBackup.databases = [ "grafana" ];
|
||||
|
||||
sops.secrets = {
|
||||
grafana-admin-password.owner = "grafana";
|
||||
grafana-ldap-password.owner = "grafana";
|
||||
grafana-oauth-secret.owner = "grafana";
|
||||
pushover-api-token.owner = "grafana";
|
||||
pushover-user-key.owner = "grafana";
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user