Alerts


/etc/config/alerting_rules.yml > BlackBox Alerts
Probe failure (0 active)
alert: Probe failure
expr: probe_success{job=~"compliance-service-check|dpm-service-check",namespace!="kube-system"} == 0
for: 1m
annotations:
  summary: The service {{ $labels.job }} is unreachable or down. please check the cluster for further information.
Public endpoint check (0 active)
alert: Public endpoint check
expr: probe_success{job=~"external.*"} == 0
for: 1m
labels:
  severity: warning
annotations:
  summary: The service {{ $labels.job }} is unreachble from internet, please check if URL is pointing to public endpoint.
/etc/config/alerting_rules.yml > MSSQL Alerts
KubernetesPodNotHealthy (32 active)
alert: KubernetesPodNotHealthy
expr: min_over_time(sum by(namespace, pod) (kube_pod_status_phase{namespace!="kube-system",phase=~"Pending|Unknown|Failed"})[15m:1m]) > 0
labels:
  severity: critical
annotations:
  description: |-
    Pod has been in a non-ready state for longer than 15 minutes.
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes Pod not healthy (instance {{ $labels.pod }})
Labels State Active Since Value
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-22src" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-8s2n6" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-62mrp" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-75v6g" severity="critical" firing 2026-01-06 22:25:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-dvb6v" severity="critical" firing 2026-01-07 00:07:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-c9grz" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-46pc7" severity="critical" firing 2025-11-18 13:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-9nph4" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-z4fqq" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-ks5w2" severity="critical" firing 2025-11-18 13:49:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-249rq" severity="critical" firing 2025-11-18 13:52:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-qn59z" severity="critical" firing 2025-11-18 13:47:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-85rpt" severity="critical" firing 2025-11-18 13:47:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-ndhg7" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-zxn5w" severity="critical" firing 2025-11-18 13:47:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="linkerd" pod="linkerd-heartbeat-29391226-284mw" severity="critical" firing 2025-11-18 13:58:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-7vhnh" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-kxs42" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-r4lxh" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-nt5dt" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-6wl4g" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-xr4tn" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-4gmgg" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-cs95m" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-qccrg" severity="critical" firing 2026-01-01 06:02:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-gcznk" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-gfjfp" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-448b7" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-sk8rv" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-srm9z" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="svc-reporting-8555988fdf-f7glj" severity="critical" firing 2026-01-01 05:48:46.936640537 +0000 UTC 1
alertname="KubernetesPodNotHealthy" namespace="p453" pod="job-licensestatuscheck-0.0.9.228.0-zxdn8" severity="critical" firing 2026-01-15 02:16:46.936640537 +0000 UTC 1
DatabaseMaintainenceJobCountIncreased (0 active)
alert: DatabaseMaintainenceJobCountIncreased
expr: jobcount{job="prometheus-query-exporter"} > 0
for: 1m
annotations:
  description: Database Maintainence Job count for database
  summary: Database Maintainence Job count for database
HostHighCpuLoad (0 active)
alert: HostHighCpuLoad
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    CPU load is > 80%
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Host high CPU load (instance {{ $labels.pod }})
KubernetesDiskPressure (0 active)
alert: KubernetesDiskPressure
expr: kube_node_status_condition{condition="DiskPressure",namespace!="kube-system",status="true"} == 1
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    {{ $labels.node }} has DiskPressure condition
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes disk pressure (instance {{ $labels.pod }})
KubernetesMemoryPressure (0 active)
alert: KubernetesMemoryPressure
expr: kube_node_status_condition{condition="MemoryPressure",namespace!="kube-system",status="true"} == 1
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    {{ $labels.node }} has MemoryPressure condition
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes memory pressure (instance {{ $labels.pod }})
KubernetesNodeReady (0 active)
alert: KubernetesNodeReady
expr: kube_node_status_condition{condition="Ready",namespace!="kube-system",status="true"} == 0
for: 10m
labels:
  severity: critical
annotations:
  description: |-
    Node {{ $labels.node }} has been unready for a long time
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes Node ready (instance {{ $labels.pod }})
KubernetesOutOfCapacity (0 active)
alert: KubernetesOutOfCapacity
expr: sum by(node) ((kube_pod_status_phase{namespace!="kube-system",phase="Running"} == 1) + on(pod, namespace) group_left(node) (0 * kube_pod_info)) / sum by(node) (kube_node_status_allocatable_pods{namespace!="kube-system"}) * 100 > 90
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    {{ $labels.node }} is out of capacity
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes out of capacity (instance {{ $labels.pod }})
KubernetesOutOfDisk (0 active)
alert: KubernetesOutOfDisk
expr: kube_node_status_condition{condition="OutOfDisk",namespace!="kube-system",status="true"} == 1
for: 2m
labels:
  severity: critical
annotations:
  description: |-
    {{ $labels.node }} has OutOfDisk condition
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes out of disk (instance {{ $labels.pod }})
KubernetesPersistentvolumeError (0 active)
alert: KubernetesPersistentvolumeError
expr: kube_persistentvolume_status_phase{job="kube-state-metrics",namespace!="kube-system",phase=~"Failed|Pending"} > 0
labels:
  severity: critical
annotations:
  description: |-
    Persistent volume is in bad state
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes PersistentVolume error (instance {{ $labels.pod }})
KubernetesPersistentvolumeclaimPending (0 active)
alert: KubernetesPersistentvolumeclaimPending
expr: kube_persistentvolumeclaim_status_phase{namespace!="kube-system",phase="Pending"} == 1
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    PersistentVolumeClaim {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is pending
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes PersistentVolumeClaim pending (instance {{ $labels.pod }})
KubernetesPodCrashLooping (0 active)
alert: KubernetesPodCrashLooping
expr: increase(kube_pod_container_status_restarts_total{namespace!="kube-system"}[1m]) > 3
for: 2m
labels:
  severity: warning
annotations:
  description: |-
    Pod {{ $labels.pod }} is crash looping
      VALUE = {{ $value }}
      LABELS = {{ $labels }}
  summary: Kubernetes pod crash looping (instance {{ $labels.pod }})
KubernetesVolumeOutOfDiskSpace (0 active)
MSSQL connectivity alert (0 active)
alert: MSSQL connectivity alert
expr: up{job="prometheus-mssql-exporter"} == 0
for: 1m
labels:
  severity: Critical
annotations:
  summary: The service {{ $labels.job }} is unreachable or down. please check the MSSQL for further information.
compliance alert (0 active)
alert: compliance alert
expr: probe_success{job="compliance",namespace!="kube-system"} == 1
labels:
  severity: warning
annotations:
  summary: The service {{ $labels.job }} compliance is enabled.
compliance alert (0 active)
alert: compliance alert
expr: probe_success{job="compliance",namespace!="kube-system"} == 0
labels:
  Notification: None
  severity: warning
annotations:
  summary: The service {{ $labels.job }} compliance is disabled.