summaryrefslogtreecommitdiffstats
path: root/krebs/2configs/shack/prometheus
diff options
context:
space:
mode:
Diffstat (limited to 'krebs/2configs/shack/prometheus')
-rw-r--r--krebs/2configs/shack/prometheus/alert-rules.nix43
-rw-r--r--krebs/2configs/shack/prometheus/alertmanager-telegram.nix17
-rw-r--r--krebs/2configs/shack/prometheus/blackbox.nix19
-rw-r--r--krebs/2configs/shack/prometheus/server.nix155
-rw-r--r--krebs/2configs/shack/prometheus/templates/shack.tmpl25
5 files changed, 154 insertions, 105 deletions
diff --git a/krebs/2configs/shack/prometheus/alert-rules.nix b/krebs/2configs/shack/prometheus/alert-rules.nix
new file mode 100644
index 00000000..12c69146
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/alert-rules.nix
@@ -0,0 +1,43 @@
+{ lib,... }:
+let
+ disk_free_threshold = "10"; # at least this much free disk percentage
+in {
+ services.prometheus.rules = [(builtins.toJSON
+ {
+ groups = [
+ { name = "shack-env";
+ rules = [
+ {
+ alert = "RootPartitionFull";
+ for = "30m";
+ expr = ''(node_filesystem_avail_bytes{alias="wolf.shack",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="wolf.shack",mountpoint="/"} < ${disk_free_threshold}'';
+ labels.severity = "warning";
+ annotations.summary = "{{ $labels.alias }} root disk full";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=wolf";
+ annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%). CI for deploying new configuration will seize working. Log in to the system and run `nix-collect-garbage -d` and clean up the shack share folder in `/home/share` .If this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
+ }
+ {
+ alert = "RootPartitionFull";
+ for = "30m";
+ expr = ''(node_filesystem_avail_bytes{alias="puyak.shack",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="puyak.shack",mountpoint="/"} < ${disk_free_threshold}'';
+ labels.severity = "warning";
+ annotations.summary = "{{ $labels.alias }} root disk full";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=puyak";
+ annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%).Prometheus will not be able to create new alerts and CI for deploying new configuration will also seize working. Log in to the system and run `nix-collect-garbage -d` and if this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
+ }
+ # wolf.shack is not worth supervising anymore
+ {
+ alert = "HostDown";
+ expr = ''up{alias="infra01.shack"} == 0'';
+ for = "5m";
+ labels.severity = "page";
+ annotations.summary = "Instance {{ $labels.alias }} down for 5 minutes";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=wolf";
+ annotations.description = ''Host {{ $labels.alias }} went down and has not been reconnected after 5 minutes. This is probably bad news, as the machine runs one of the DNS servers and the power broadcast proxy which is used to be able to turn off the light via puyak as well as the shutdown listener.'';
+ }
+ ];
+ }
+ ];
+ }
+ )];
+}
diff --git a/krebs/2configs/shack/prometheus/alertmanager-telegram.nix b/krebs/2configs/shack/prometheus/alertmanager-telegram.nix
new file mode 100644
index 00000000..8527001c
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/alertmanager-telegram.nix
@@ -0,0 +1,17 @@
+{ pkgs, ...}:
+{
+ systemd.services.alertmanager-bot-telegram = {
+ wantedBy = [ "multi-user.target" ];
+ after = [ "ip-up.target" ];
+ serviceConfig = {
+ EnvironmentFile = toString <secrets/shack/telegram_bot.env>;
+ DynamicUser = true;
+ StateDirectory = "alertbot";
+ ExecStart = ''${pkgs.alertmanager-bot-telegram}/bin/alertmanager-bot \
+ --alertmanager.url=http://alert.prometheus.shack --log.level=info \
+ --store=bolt --bolt.path=/var/lib/alertbot/bot.db \
+ --listen.addr="0.0.0.0:16320" \
+ --template.paths=${./templates}/shack.tmpl'';
+ };
+ };
+}
diff --git a/krebs/2configs/shack/prometheus/blackbox.nix b/krebs/2configs/shack/prometheus/blackbox.nix
new file mode 100644
index 00000000..82ce003e
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/blackbox.nix
@@ -0,0 +1,19 @@
+{pkgs, ... }:
+{
+ systemd.services.prometheus-blackbox-exporter.serviceConfig = {
+ CapabilityBoundingSet = ["CAP_NET_RAW"]; # icmp allow
+ AmbientCapabilities = ["CAP_NET_RAW"];
+ };
+ services.prometheus.exporters.blackbox = {
+ enable = true;
+ # openFirewall = true; # not requred if running on the same host as prometheus
+ port = 9115;
+ configFile = pkgs.writeText "icmp" ''
+ modules:
+ icmp:
+ prober: icmp
+ icmp:
+ preferred_ip_protocol: ip4
+ '';
+ };
+}
diff --git a/krebs/2configs/shack/prometheus/server.nix b/krebs/2configs/shack/prometheus/server.nix
index 7f6f3861..9e4b4d1a 100644
--- a/krebs/2configs/shack/prometheus/server.nix
+++ b/krebs/2configs/shack/prometheus/server.nix
@@ -1,6 +1,9 @@
{ pkgs, lib, config, ... }:
# from https://gist.github.com/globin/02496fd10a96a36f092a8e7ea0e6c7dd
{
+ imports = [
+ ./alert-rules.nix
+ ];
networking = {
firewall.allowedTCPPorts = [
9090 # prometheus
@@ -18,100 +21,6 @@
};
prometheus = {
enable = true;
- extraFlags = [
- "-storage.local.retention 720h"
- "-storage.local.series-file-shrink-ratio 0.3"
- "-storage.local.memory-chunks 2097152"
- "-storage.local.max-chunks-to-persist 1048576"
- "-storage.local.index-cache-size.fingerprint-to-metric 2097152"
- "-storage.local.index-cache-size.fingerprint-to-timerange 1048576"
- "-storage.local.index-cache-size.label-name-to-label-values 2097152"
- "-storage.local.index-cache-size.label-pair-to-fingerprints 41943040"
- ];
- rules = [
- ''
- ALERT node_down
- IF up == 0
- FOR 5m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: Node is down.",
- description = "{{$labels.alias}} has been down for more than 5 minutes."
- }
- ALERT node_systemd_service_failed
- IF node_systemd_unit_state{state="failed"} == 1
- FOR 4m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start.",
- description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}."
- }
- ALERT node_filesystem_full_90percent
- IF sort(node_filesystem_free{device!="ramfs"} < node_filesystem_size{device!="ramfs"} * 0.1) / 1024^3
- FOR 5m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: Filesystem is running out of space soon.",
- description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 10% space left on its filesystem."
- }
- ALERT node_filesystem_full_in_4h
- IF predict_linear(node_filesystem_free{device!="ramfs"}[1h], 4*3600) <= 0
- FOR 5m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: Filesystem is running out of space in 4 hours.",
- description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 4 hours"
- }
- ALERT node_filedescriptors_full_in_3h
- IF predict_linear(node_filefd_allocated[1h], 3*3600) >= node_filefd_maximum
- FOR 20m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours.",
- description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours"
- }
- ALERT node_load1_90percent
- IF node_load1 / on(alias) count(node_cpu{mode="system"}) by (alias) >= 0.9
- FOR 1h
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: Running on high load.",
- description = "{{$labels.alias}} is running with > 90% total load for at least 1h."
- }
- ALERT node_cpu_util_90percent
- IF 100 - (avg by (alias) (irate(node_cpu{mode="idle"}[5m])) * 100) >= 90
- FOR 1h
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary = "{{$labels.alias}}: High CPU utilization.",
- description = "{{$labels.alias}} has total CPU utilization over 90% for at least 1h."
- }
- ALERT node_ram_using_90percent
- IF node_memory_MemFree + node_memory_Buffers + node_memory_Cached < node_memory_MemTotal * 0.1
- FOR 30m
- LABELS {
- severity="page"
- }
- ANNOTATIONS {
- summary="{{$labels.alias}}: Using lots of RAM.",
- description="{{$labels.alias}} is using at least 90% of its RAM for at least 30 minutes now.",
- }
- ''
- ];
scrapeConfigs = [
{
job_name = "node";
@@ -119,7 +28,7 @@
static_configs = [
{
targets = [
- "localhost:9100"
+ "wolf.shack:9100"
];
labels = {
alias = "wolf.shack";
@@ -127,7 +36,15 @@
}
{
targets = [
- "localhost:9130"
+ "infra01.shack:9100"
+ ];
+ labels = {
+ alias = "infra01.shack";
+ };
+ }
+ {
+ targets = [
+ "unifi.shack:9130"
];
labels = {
alias = "unifi.shack";
@@ -135,7 +52,7 @@
}
{
targets = [
- "10.42.22.184:9100" # puyak.shack
+ "puyak.shack:9100" # puyak.shack
];
labels = {
alias = "puyak.shack";
@@ -159,6 +76,36 @@
}
];
}
+ {
+ job_name = "blackbox";
+ metrics_path = "/probe";
+ params.module = [ "icmp" ];
+ static_configs = [
+ {
+ targets = [
+ "google.com"
+ "wolf.shack"
+ "web.de"
+ "10.0.0.1"
+ "licht.shack"
+ ];
+ }
+ ];
+ relabel_configs = [
+ {
+ source_labels = ["__address__"];
+ target_label = "__param_target";
+ }
+ {
+ source_labels = ["__param_target"];
+ target_label = "instance";
+ }
+ {
+ target_label = "__address__";
+ replacement = "127.0.0.1:9115";
+ }
+ ];
+ }
];
alertmanagers = [
{ scheme = "http";
@@ -168,7 +115,10 @@
];
alertmanager = {
enable = true;
- listenAddress = "0.0.0.0";
+ listenAddress = "127.0.0.1";
+ webExternalUrl = "http://alert.prometheus.shack";
+ logLevel = "debug";
+
configuration = {
"global" = {
"smtp_smarthost" = "smtp.example.com:587";
@@ -184,15 +134,10 @@
"receivers" = [
{
"name" = "team-admins";
- "email_configs" = [
- {
- "to" = "devnull@example.com";
- "send_resolved" = true;
- }
- ];
+ "email_configs" = [ ];
"webhook_configs" = [
{
- "url" = "https://example.com/prometheus-alerts";
+ "url" = "http://localhost:16320";
"send_resolved" = true;
}
];
diff --git a/krebs/2configs/shack/prometheus/templates/shack.tmpl b/krebs/2configs/shack/prometheus/templates/shack.tmpl
new file mode 100644
index 00000000..9295f019
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/templates/shack.tmpl
@@ -0,0 +1,25 @@
+{{ define "telegram.default" }}
+{{range .Alerts -}}
+{{ $severity := index .Labels "severity" }}
+{{ $desc := "No Description" }}
+{{ if eq .Status "firing" }}
+ {{ $desc = index .Annotations "description" }}
+ {{- if eq $severity "critical" -}}
+ <i><u><b>[CRITICAL]</b></u></i>
+ {{- else if eq $severity "warning" -}}
+ <u><b>[WARNING]</b></u>
+ {{- else -}}
+ <b>[{{ $severity }}]</b>
+ {{- end -}}
+{{ else -}}
+ {{ $desc = "The issue has been resolved" }}
+ <del>[RESOLVED]</del>
+{{- end }} {{ index .Labels "alertname"}}: {{ index .Annotations "summary"}}
+
+{{ $desc }}
+
+Alert Links:
+* <a href="{{ index .Annotations "url"}}">Grafana</a>
+* <a href="{{ .GeneratorURL }}">Source</a>
+{{end -}}
+{{end}}