]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/prometheus: expose daemon health metrics 48843/head
authorPere Diaz Bou <pdiazbou@redhat.com>
Fri, 11 Nov 2022 09:43:01 +0000 (10:43 +0100)
committerPere Diaz Bou <pdiazbou@redhat.com>
Tue, 20 Dec 2022 08:44:49 +0000 (09:44 +0100)
Until now daemon health metrics were stored without being used. One of
the most helpful metrics there is SLOW_OPS with respect to OSDs and MONs
which this commit tries to expose to bring fine grained metrics to find
troublesome OSDs instead of having a lone healthcheck of slow ops in the
whole cluster.

Signed-off-by: Pere Diaz Bou <pdiazbou@redhat.com>
16 files changed:
doc/mgr/modules.rst
monitoring/ceph-mixin/dashboards/host.libsonnet
monitoring/ceph-mixin/dashboards/osd.libsonnet
monitoring/ceph-mixin/dashboards_out/host-details.json
monitoring/ceph-mixin/dashboards_out/osds-overview.json
monitoring/ceph-mixin/prometheus_alerts.libsonnet
monitoring/ceph-mixin/prometheus_alerts.yml
monitoring/ceph-mixin/tests_alerts/test_alerts.yml
src/mgr/ActivePyModules.cc
src/mgr/ActivePyModules.h
src/mgr/BaseMgrModule.cc
src/mgr/DaemonHealthMetric.h
src/mgr/DaemonServer.cc
src/pybind/mgr/ceph_module.pyi
src/pybind/mgr/mgr_module.py
src/pybind/mgr/prometheus/module.py

index 454839e2abd4c30fb48140175d06b4ff0a2407fa..667664139739fef67e3a0eb44567039dd40f0af9 100644 (file)
@@ -508,6 +508,7 @@ function. This will result in a circular locking exception.
 .. automethod:: MgrModule.get_perf_schema
 .. automethod:: MgrModule.get_counter
 .. automethod:: MgrModule.get_mgr_id
+.. automethod:: MgrModule.get_daemon_health_metrics
 
 Exposing health checks
 ----------------------
index 3e0b31f2c4595fe110ffdc2b0f8bb390f8aee4e4..1c66120af9c2cc57653ed18f6c2b424a8f51b0f6 100644 (file)
@@ -719,5 +719,30 @@ local g = import 'grafonnet/grafana.libsonnet';
         11,
         9
       ),
+      $.addTableSchema(
+        '$datasource',
+        'This table shows the 10 hosts with the highest number of slow ops',
+        { col: 2, desc: true },
+        [
+          $.overviewStyle('Instance', 'instance', 'string', 'short'),
+          $.overviewStyle('Slow Ops', 'Value', 'number', 'none'),
+          $.overviewStyle('', '/.*/', 'hidden', 'short'),
+        ],
+        'Top Slow Ops per Host',
+        'table'
+      )
+      .addTarget(
+        $.addTargetSchema(
+          |||
+            topk(10,
+              (sum by (instance)(ceph_daemon_health_metrics{type="SLOW_OPS", ceph_daemon=~"osd.*"}))
+            )
+          ||| % $.matchers(),
+          '',
+          'table',
+          1,
+          true
+        )
+      ) + { gridPos: { x: 0, y: 40, w: 4, h: 8 } },
     ]),
 }
index 129b74ba6669112df29001a6f63345c860678afe..0ea43c96ff9f04f44d445ef6124c28abdd13fe52 100644 (file)
@@ -300,6 +300,31 @@ local g = import 'grafonnet/grafana.libsonnet';
       .addTargets([$.addTargetSchema(
         'round(sum(rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])))' % $.matchers(), 'Writes'
       )]),
+      $.addTableSchema(
+        '$datasource',
+        'This table shows the 10 OSDs with the highest number of slow ops',
+        { col: 2, desc: true },
+        [
+          $.overviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'),
+          $.overviewStyle('Slow Ops', 'Value', 'number', 'none'),
+          $.overviewStyle('', '/.*/', 'hidden', 'short'),
+        ],
+        'Top Slow Ops',
+        'table'
+      )
+      .addTarget(
+        $.addTargetSchema(
+          |||
+            topk(10,
+              (ceph_daemon_health_metrics{type="SLOW_OPS", ceph_daemon=~"osd.*"})
+            )
+          ||| % $.matchers(),
+          '',
+          'table',
+          1,
+          true
+        )
+      ) + { gridPos: { x: 0, y: 20, w: 4, h: 8 } },
     ]),
   'osd-device-details.json':
     local OsdDeviceDetailsPanel(title,
index 93c51f00941072123cc56e7e7a7651b8f4432f22..b8e08697a98bace90d74133de0a1d8c124891760 100644 (file)
                "show": true
             }
          ]
+      },
+      {
+         "columns": [ ],
+         "datasource": "$datasource",
+         "description": "This table shows the 10 hosts with the highest number of slow ops",
+         "gridPos": {
+            "h": 8,
+            "w": 4,
+            "x": 0,
+            "y": 40
+         },
+         "id": 15,
+         "links": [ ],
+         "sort": {
+            "col": 2,
+            "desc": true
+         },
+         "styles": [
+            {
+               "alias": "Instance",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "instance",
+               "thresholds": [ ],
+               "type": "string",
+               "unit": "short",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "Slow Ops",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "Value",
+               "thresholds": [ ],
+               "type": "number",
+               "unit": "none",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "/.*/",
+               "thresholds": [ ],
+               "type": "hidden",
+               "unit": "short",
+               "valueMaps": [ ]
+            }
+         ],
+         "targets": [
+            {
+               "expr": "topk(10,\n  (sum by (instance)(ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"}))\n)\n",
+               "format": "table",
+               "instant": true,
+               "intervalFactor": 1,
+               "legendFormat": "",
+               "refId": "A"
+            }
+         ],
+         "timeFrom": null,
+         "timeShift": null,
+         "title": "Top Slow Ops per Host",
+         "transform": "table",
+         "type": "table"
       }
    ],
    "refresh": "30s",
index 5ea8955b29740c028a2edf3cea311b287f42e666..b34c6642263da46c6b5049b0f11b1fdde3c1cd91 100644 (file)
                "show": true
             }
          ]
+      },
+      {
+         "columns": [ ],
+         "datasource": "$datasource",
+         "description": "This table shows the 10 OSDs with the highest number of slow ops",
+         "gridPos": {
+            "h": 8,
+            "w": 4,
+            "x": 0,
+            "y": 20
+         },
+         "id": 13,
+         "links": [ ],
+         "sort": {
+            "col": 2,
+            "desc": true
+         },
+         "styles": [
+            {
+               "alias": "OSD ID",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "ceph_daemon",
+               "thresholds": [ ],
+               "type": "string",
+               "unit": "short",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "Slow Ops",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "Value",
+               "thresholds": [ ],
+               "type": "number",
+               "unit": "none",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "/.*/",
+               "thresholds": [ ],
+               "type": "hidden",
+               "unit": "short",
+               "valueMaps": [ ]
+            }
+         ],
+         "targets": [
+            {
+               "expr": "topk(10,\n  (ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"})\n)\n",
+               "format": "table",
+               "instant": true,
+               "intervalFactor": 1,
+               "legendFormat": "",
+               "refId": "A"
+            }
+         ],
+         "timeFrom": null,
+         "timeShift": null,
+         "title": "Top Slow Ops",
+         "transform": "table",
+         "type": "table"
       }
    ],
    "refresh": "30s",
index d56628fb2d75c054b1986b6fe9042cd1a6fe8b40..b7ec0da2f04aab24936bf46c979eb32cd63137fe 100644 (file)
             description: '{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)',
           },
         },
+        {
+          alert: 'CephDaemonSlowOps',
+          'for': '30s',
+          expr: 'ceph_daemon_health_metrics{type="SLOW_OPS"} > 0',
+          labels: { severity: 'warning', type: 'ceph_default' },
+          annotations: {
+            documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops',
+            summary: '{{ $labels.ceph_daemon }} operations are slow to complete',
+            description: '{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)',
+          },
+        },
       ],
     },
     {
index 47fdd43a81b732dfb6f8be98b44ba8d7f825e215..a3d0cb3cc135c8b842ec136461f33cd12c786028 100644 (file)
@@ -572,6 +572,16 @@ groups:
         labels:
           severity: "warning"
           type: "ceph_default"
+      - alert: "CephDaemonSlowOps"
+        for: "30s"
+        expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0"
+        labels: 
+          severity: 'warning'
+          type: 'ceph_default'
+        annotations:
+          summary: "{{ $labels.ceph_daemon }} operations are slow to complete"
+          description: "{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)"
+          documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
   - name: "cephadm"
     rules:
       - alert: "CephadmUpgradeFailed"
index 8902d9c1493a36a196e8e055687616cb59ec8377..06e1256d3135fe7c45455c0c0a6396b9612e4ad5 100644 (file)
@@ -710,6 +710,33 @@ tests:
            summary: OSD operations are slow to complete
            description: "1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
 
+ # slow daemon ops
+ - interval : 1m
+   input_series:
+    - series: 'ceph_daemon_health_metrics{ceph_daemon="osd.1", instance="ceph:9283",job="ceph", type="SLOW_OPS"}'
+      values: '1+0x120'
+   promql_expr_test:
+     - expr: 'ceph_daemon_health_metrics{type="SLOW_OPS"} > 0'
+       eval_time: 1m
+       exp_samples:
+         - labels: '{__name__="ceph_daemon_health_metrics", ceph_daemon="osd.1",instance="ceph:9283",
+           job="ceph", type="SLOW_OPS"}'
+           value: 1
+   alert_rule_test:
+     - eval_time: 20m
+       alertname: CephDaemonSlowOps
+       exp_alerts:
+       - exp_labels:
+           instance: ceph:9283
+           ceph_daemon: "osd.1"
+           job: ceph
+           severity: warning
+           type: ceph_default
+         exp_annotations:
+           documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
+           summary: osd.1 operations are slow to complete
+           description: "osd.1 operations are taking too long to process (complaint time exceeded)"
+
 # CEPHADM orchestrator alert triggers
  - interval: 30s
    input_series:
index 58c3d9ee4d6f4d9fc062150f645fa518313e9048..8c070dd8cbf414148f2ec5264744db69a341c25d 100644 (file)
@@ -1515,3 +1515,26 @@ void ActivePyModules::unregister_client(std::string_view name, std::string addrs
   dout(7) << "unregistering msgr client handle " << addrv << dendl;
   py_module_registry.unregister_client(name, addrv);
 }
+
+PyObject* ActivePyModules::get_daemon_health_metrics()
+{
+  without_gil_t no_gil;
+  return daemon_state.with_daemons_by_server([&no_gil]
+      (const std::map<std::string, DaemonStateCollection> &all) {
+      no_gil.acquire_gil();
+      PyFormatter f;
+      for (const auto &[hostname, daemon_state] : all) {
+        for (const auto &[key, state] : daemon_state) {
+          f.open_array_section(ceph::to_string(key));
+          for (const auto &metric : state->daemon_health_metrics) {
+            f.open_object_section(metric.get_type_name());
+            f.dump_int("value", metric.get_n1());
+            f.dump_string("type", metric.get_type_name());
+            f.close_section();
+          }
+          f.close_section();
+        }
+      }
+      return f.get();
+  });
+}
index f283224f5880785c33209bedf7045df1ce6204e5..8f70b4fe05a3485206a9a99c557d04e00b4fdaa6 100644 (file)
@@ -222,6 +222,7 @@ public:
 
   void cluster_log(const std::string &channel, clog_type prio,
     const std::string &message);
+  PyObject* get_daemon_health_metrics();
 
   bool inject_python_on() const;
   void update_cache_metrics();
index ca441d5e539df1046c8b02ae0a6dc5231c1be345..4fb5b250b98b9cc79c404a8a27a454fae10f606a 100644 (file)
@@ -1411,6 +1411,12 @@ ceph_unregister_client(BaseMgrModule *self, PyObject *args)
   Py_RETURN_NONE;
 }
 
+static PyObject*
+ceph_get_daemon_health_metrics(BaseMgrModule *self, PyObject *args)
+{
+  return self->py_modules->get_daemon_health_metrics();
+}
+
 PyMethodDef BaseMgrModule_methods[] = {
   {"_ceph_get", (PyCFunction)ceph_state_get, METH_VARARGS,
    "Get a cluster object"},
@@ -1540,6 +1546,9 @@ PyMethodDef BaseMgrModule_methods[] = {
   {"_ceph_unregister_client", (PyCFunction)ceph_unregister_client,
     METH_VARARGS, "Unregister RADOS instance for potential blocklisting"},
 
+  {"_ceph_get_daemon_health_metrics", (PyCFunction)ceph_get_daemon_health_metrics,
+    METH_VARARGS, "Get health metrics for all daemons"},
+
   {NULL, NULL, 0, NULL}
 };
 
index ad3ea29efd46b14916fb961759a50744a184e3e6..ce0dad2c87e317ee43e374a682ae57e325f2555e 100644 (file)
@@ -44,8 +44,9 @@ public:
     : type(type_), value(n)
   {}
   DaemonHealthMetric(daemon_metric type_, uint32_t n1, uint32_t n2)
-    : type(type_), value(n1, n2)
+    : type(type_), value(n1, n2) 
   {}
+
   daemon_metric get_type() const {
     return type;
   }
@@ -58,6 +59,7 @@ public:
   uint32_t get_n2() const {
     return value.n2;
   }
+
   DENC(DaemonHealthMetric, v, p) {
     DENC_START(1, 1, p);
     denc(v.type, p);
@@ -65,6 +67,10 @@ public:
     DENC_FINISH(p);
   }
 
+  std::string get_type_name() const {
+    return daemon_metric_name(get_type());
+  }
+
   friend std::ostream& operator<<(std::ostream& out, const DaemonHealthMetric& m) {
     return out << daemon_metric_name(m.get_type()) << "("
               << m.get_n() << "|(" << m.get_n1() << "," << m.get_n2() << "))";
index 747faec2ec32d75d727f96a850933a9687d370dc..40a096cab11d2adcab40e252dfd6724e428a5bcf 100644 (file)
@@ -2622,8 +2622,6 @@ void DaemonServer::send_report()
                 << std::dec << dendl;
             continue;
           }
-         dout(20) << " + " << state->key << " "
-                  << metric << dendl;
           tie(acc, std::ignore) = accumulated.emplace(metric.get_type(),
               std::move(collector));
         }
index 1719192957611c7bfb34332d98a9afc43a89f33d..b89402d01be09ef6a53f5d1ef46e5463d3ea7e7e 100644 (file)
@@ -115,3 +115,4 @@ class BaseMgrModule(object):
     def _ceph_unregister_client(self, addrs: str) -> None: ...
     def _ceph_register_client(self, addrs: str) -> None: ...
     def _ceph_is_authorized(self, arguments: Dict[str, str]) -> bool: ...
+    def _ceph_get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]: ...
index 146901b1578efd3663a5d37391b7824f9130291d..d88c1de47652dd750b311578b3aaa2e4cb05609a 100644 (file)
@@ -2282,6 +2282,13 @@ class MgrModule(ceph_module.BaseMgrModule, MgrModuleLoggingMixin):
         """
         return self._ceph_get_mds_perf_counters(query_id)
 
+    def get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]:
+        """
+        Get the list of health metrics per daemon. This includes SLOW_OPS health metrics
+        in MON and OSD daemons, and PENDING_CREATING_PGS health metrics for OSDs.
+        """
+        return self._ceph_get_daemon_health_metrics()
+
     def is_authorized(self, arguments: Dict[str, str]) -> bool:
         """
         Verifies that the current session caps permit executing the py service
index 8e930bb304ed31e3780da373b82501ada1416345..b0399590bdcbd3845b73c86929a0cb5085183f6e 100644 (file)
@@ -1578,6 +1578,21 @@ class Module(MgrModule):
             self.metrics[path].set(stats['stat_sum']['num_objects_repaired'],
                                    labelvalues=(stats['poolid'],))
 
+    def get_all_daemon_health_metrics(self) -> None:
+        daemon_metrics = self.get_daemon_health_metrics()
+        self.log.debug('metrics jeje %s' % (daemon_metrics))
+        for daemon_name, health_metrics in daemon_metrics.items():
+            for health_metric in health_metrics:
+                path = f'daemon_health_metrics{daemon_name}{health_metric["type"]}'
+                self.metrics[path] = Metric(
+                    'counter',
+                    'daemon_health_metrics',
+                    'Health metrics for Ceph daemons',
+                    ('type', 'ceph_daemon',)
+                )
+                self.metrics[path].set(health_metric['value'], labelvalues=(
+                    health_metric['type'], daemon_name,))
+
     @profile_method(True)
     def collect(self) -> str:
         # Clear the metrics before scraping
@@ -1595,6 +1610,7 @@ class Module(MgrModule):
         self.get_pg_status()
         self.get_pool_repaired_objects()
         self.get_num_objects()
+        self.get_all_daemon_health_metrics()
 
         for daemon, counters in self.get_all_perf_counters().items():
             for path, counter_info in counters.items():