]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
monitoring: fix CephPgImbalance alert rule expression 66828/head
authorAashish Sharma <Aashish.Sharma1@ibm.com>
Thu, 11 Dec 2025 08:38:11 +0000 (14:08 +0530)
committerAashish Sharma <aashish@li-e9bf2ecc-2ad7-11b2-a85c-baf05c5182ab.ibm.com>
Thu, 8 Jan 2026 04:43:49 +0000 (10:13 +0530)
The alert CephPGImbalance doesn't take any device classes configured into account. As a result, there can be false positives when using mixed-size OSD disks.
Ref: https://github.com/rook/rook/discussions/13126#discussioncomment-10043490

Fixes: https://tracker.ceph.com/issues/69690
Signed-off-by: Aashish Sharma <aasharma@redhat.com>
(cherry picked from commit 5b4f7373655fa829af359d6e3cc61416964a97f0)

monitoring/ceph-mixin/prometheus_alerts.yml
monitoring/ceph-mixin/tests_alerts/test_alerts.yml

index e58072b9bcfbf38e5b4020ccb698f6bb291a484f..8901a477a2a8efcfade1e628d01a0ff2ccae3275 100644 (file)
@@ -235,13 +235,25 @@ groups:
           type: "ceph_default"
       - alert: "CephPGImbalance"
         annotations:
-          description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count."
+          description: "OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} deviates by more than 30% from average PG count in the device class {{ $labels.device_class }}."
           summary: "PGs are not balanced across OSDs on cluster {{ $labels.cluster }}"
         expr: |
           abs(
-            ((ceph_osd_numpg > 0) - on (cluster,job) group_left avg(ceph_osd_numpg > 0) by (cluster,job)) /
-            on (job) group_left avg(ceph_osd_numpg > 0) by (job)
-          ) * on (cluster,ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
+            (
+              (
+                (ceph_osd_numpg > 0)
+                * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+              )
+              - on (cluster, job, device_class) group_left avg(
+                  (ceph_osd_numpg > 0)
+                  * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+                ) by (cluster, job, device_class)
+            )
+            / on (cluster, job, device_class) group_left avg(
+                (ceph_osd_numpg > 0)
+                * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+              ) by (cluster, job, device_class)
+          ) > 0.30
         for: "5m"
         labels:
           oid: "1.3.6.1.4.1.50495.1.2.1.4.5"
index 0beb1da6fddbf3a5864c076d1ae6104cef93827a..88caf7a3725eaee67a20e11ca5e2f1a149f9c6fd 100644 (file)
@@ -176,35 +176,36 @@ tests:
       values: '100 100 100 100 100 160'
     - series: 'ceph_osd_numpg{ceph_daemon="osd.3",instance="ceph:9283",job="ceph",cluster="mycluster"}'
       values: '100 100 100 100 100 160'
-    - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.0",ceph_version="ceph version 17.0.0-189-g3558fd72
-        (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
-        hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",public_addr="172.20.0.2",cluster="mycluster"}'
+    - series: 'ceph_osd_metadata{ceph_daemon="osd.0",device_class="hdd",hostname="ceph",instance="ceph:9283",job="ceph",cluster="mycluster"}'
       values: '1 1 1 1 1 1'
-    - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.1",ceph_version="ceph version 17.0.0-189-g3558fd72
-        (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
-        hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",public_addr="172.20.0.2",cluster="mycluster"}'
+    - series: 'ceph_osd_metadata{ceph_daemon="osd.1",device_class="hdd",hostname="ceph",instance="ceph:9283",job="ceph",cluster="mycluster"}'
       values: '1 1 1 1 1 1'
-    - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.2",ceph_version="ceph version 17.0.0-189-g3558fd72
-        (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
-        hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",public_addr="172.20.0.2",cluster="mycluster"}'
+    - series: 'ceph_osd_metadata{ceph_daemon="osd.2",device_class="hdd",hostname="ceph",instance="ceph:9283",job="ceph",cluster="mycluster"}'
       values: '1 1 1 1 1 1'
-    - series: 'ceph_osd_metadata{back_iface="eth0",ceph_daemon="osd.3",ceph_version="ceph version 17.0.0-189-g3558fd72
-        (3558fd7291855971aa6481a2ade468ad61fbb346) pacific (dev)",cluster_addr="172.20.0.2",device_class="hdd",front_iface="eth0",
-        hostname="ceph",instance="ceph:9283",job="ceph",objectstore="bluestore",public_addr="172.20.0.2",cluster="mycluster"}'
+    - series: 'ceph_osd_metadata{ceph_daemon="osd.3",device_class="hdd",hostname="ceph",instance="ceph:9283",job="ceph",cluster="mycluster"}'
       values: '1 1 1 1 1 1'
    promql_expr_test:
      - expr: |
          abs(
            (
-             (ceph_osd_numpg > 0) - on (job) group_left avg(ceph_osd_numpg > 0)
-             by (job)
-           ) / on (job) group_left avg(ceph_osd_numpg > 0) by (job)
-         ) * on(ceph_daemon) group_left(hostname) ceph_osd_metadata > 0.30
-
+             (
+               (ceph_osd_numpg > 0)
+               * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+             )
+             - on (cluster, job, device_class) group_left avg(
+                (ceph_osd_numpg > 0)
+                * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+              ) by (cluster, job, device_class)
+           )
+           / on (cluster, job, device_class) group_left avg(
+              (ceph_osd_numpg > 0)
+              * on (cluster, job, ceph_daemon) group_left(hostname, device_class) ceph_osd_metadata
+           ) by (cluster, job, device_class)
+          ) > 0.30
        eval_time: 5m
        exp_samples:
-         - labels: '{ceph_daemon="osd.1", hostname="ceph", instance="ceph:9283",job="ceph",cluster="mycluster"}'
-           value: 6E-01
+        - labels: '{ceph_daemon="osd.1", hostname="ceph", device_class="hdd", instance="ceph:9283", job="ceph", cluster="mycluster"}'
+          value: 0.60
    alert_rule_test:
      - eval_time: 10m
        alertname: CephPGImbalance
@@ -212,15 +213,16 @@ tests:
        - exp_labels:
            ceph_daemon: osd.1
            hostname: ceph
+           device_class: hdd
            instance: ceph:9283
            job: ceph
+           cluster: mycluster
            oid: 1.3.6.1.4.1.50495.1.2.1.4.5
            severity: warning
-           cluster: mycluster
            type: ceph_default
          exp_annotations:
            summary: PGs are not balanced across OSDs on cluster mycluster
-           description: "OSD osd.1 on ceph deviates by more than 30% from average PG count."
+           description: "OSD osd.1 on ceph deviates by more than 30% from average PG count in the device class hdd."
 
  # pgs inactive
  - interval: 1m