]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/prometheus: expose daemon health metrics 49520/head
authorPere Diaz Bou <pdiazbou@redhat.com>
Fri, 11 Nov 2022 09:43:01 +0000 (10:43 +0100)
committerPere Diaz Bou <pdiazbou@redhat.com>
Tue, 20 Dec 2022 11:36:30 +0000 (12:36 +0100)
Until now daemon health metrics were stored without being used. One of
the most helpful metrics there is SLOW_OPS with respect to OSDs and MONs
which this commit tries to expose to bring fine grained metrics to find
troublesome OSDs instead of having a lone healthcheck of slow ops in the
whole cluster.

Signed-off-by: Pere Diaz Bou <pdiazbou@redhat.com>
(cherry picked from commit 5a2b7c25b68f2c955356640041e4c7ed72416d4e)

16 files changed:
doc/mgr/modules.rst
monitoring/ceph-mixin/dashboards/host.libsonnet
monitoring/ceph-mixin/dashboards/osd.libsonnet
monitoring/ceph-mixin/dashboards_out/host-details.json
monitoring/ceph-mixin/dashboards_out/osds-overview.json
monitoring/ceph-mixin/prometheus_alerts.libsonnet
monitoring/ceph-mixin/prometheus_alerts.yml
monitoring/ceph-mixin/tests_alerts/test_alerts.yml
src/mgr/ActivePyModules.cc
src/mgr/ActivePyModules.h
src/mgr/BaseMgrModule.cc
src/mgr/DaemonHealthMetric.h
src/mgr/DaemonServer.cc
src/pybind/mgr/ceph_module.pyi
src/pybind/mgr/mgr_module.py
src/pybind/mgr/prometheus/module.py

index 8979b4e6a537a3d18d53849f867fa367d8d617b1..9ac0d30f1afc89ef1f3f6ca1459588f376522d96 100644 (file)
@@ -312,6 +312,7 @@ function. This will result in a circular locking exception.
 .. automethod:: MgrModule.get_perf_schema
 .. automethod:: MgrModule.get_counter
 .. automethod:: MgrModule.get_mgr_id
+.. automethod:: MgrModule.get_daemon_health_metrics
 
 Exposing health checks
 ----------------------
index 3e0b31f2c4595fe110ffdc2b0f8bb390f8aee4e4..1c66120af9c2cc57653ed18f6c2b424a8f51b0f6 100644 (file)
@@ -719,5 +719,30 @@ local g = import 'grafonnet/grafana.libsonnet';
         11,
         9
       ),
+      $.addTableSchema(
+        '$datasource',
+        'This table shows the 10 hosts with the highest number of slow ops',
+        { col: 2, desc: true },
+        [
+          $.overviewStyle('Instance', 'instance', 'string', 'short'),
+          $.overviewStyle('Slow Ops', 'Value', 'number', 'none'),
+          $.overviewStyle('', '/.*/', 'hidden', 'short'),
+        ],
+        'Top Slow Ops per Host',
+        'table'
+      )
+      .addTarget(
+        $.addTargetSchema(
+          |||
+            topk(10,
+              (sum by (instance)(ceph_daemon_health_metrics{type="SLOW_OPS", ceph_daemon=~"osd.*"}))
+            )
+          ||| % $.matchers(),
+          '',
+          'table',
+          1,
+          true
+        )
+      ) + { gridPos: { x: 0, y: 40, w: 4, h: 8 } },
     ]),
 }
index 129b74ba6669112df29001a6f63345c860678afe..0ea43c96ff9f04f44d445ef6124c28abdd13fe52 100644 (file)
@@ -300,6 +300,31 @@ local g = import 'grafonnet/grafana.libsonnet';
       .addTargets([$.addTargetSchema(
         'round(sum(rate(ceph_pool_wr{%(matchers)s}[$__rate_interval])))' % $.matchers(), 'Writes'
       )]),
+      $.addTableSchema(
+        '$datasource',
+        'This table shows the 10 OSDs with the highest number of slow ops',
+        { col: 2, desc: true },
+        [
+          $.overviewStyle('OSD ID', 'ceph_daemon', 'string', 'short'),
+          $.overviewStyle('Slow Ops', 'Value', 'number', 'none'),
+          $.overviewStyle('', '/.*/', 'hidden', 'short'),
+        ],
+        'Top Slow Ops',
+        'table'
+      )
+      .addTarget(
+        $.addTargetSchema(
+          |||
+            topk(10,
+              (ceph_daemon_health_metrics{type="SLOW_OPS", ceph_daemon=~"osd.*"})
+            )
+          ||| % $.matchers(),
+          '',
+          'table',
+          1,
+          true
+        )
+      ) + { gridPos: { x: 0, y: 20, w: 4, h: 8 } },
     ]),
   'osd-device-details.json':
     local OsdDeviceDetailsPanel(title,
index 93c51f00941072123cc56e7e7a7651b8f4432f22..b8e08697a98bace90d74133de0a1d8c124891760 100644 (file)
                "show": true
             }
          ]
+      },
+      {
+         "columns": [ ],
+         "datasource": "$datasource",
+         "description": "This table shows the 10 hosts with the highest number of slow ops",
+         "gridPos": {
+            "h": 8,
+            "w": 4,
+            "x": 0,
+            "y": 40
+         },
+         "id": 15,
+         "links": [ ],
+         "sort": {
+            "col": 2,
+            "desc": true
+         },
+         "styles": [
+            {
+               "alias": "Instance",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "instance",
+               "thresholds": [ ],
+               "type": "string",
+               "unit": "short",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "Slow Ops",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "Value",
+               "thresholds": [ ],
+               "type": "number",
+               "unit": "none",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "/.*/",
+               "thresholds": [ ],
+               "type": "hidden",
+               "unit": "short",
+               "valueMaps": [ ]
+            }
+         ],
+         "targets": [
+            {
+               "expr": "topk(10,\n  (sum by (instance)(ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"}))\n)\n",
+               "format": "table",
+               "instant": true,
+               "intervalFactor": 1,
+               "legendFormat": "",
+               "refId": "A"
+            }
+         ],
+         "timeFrom": null,
+         "timeShift": null,
+         "title": "Top Slow Ops per Host",
+         "transform": "table",
+         "type": "table"
       }
    ],
    "refresh": "30s",
index 5ea8955b29740c028a2edf3cea311b287f42e666..b34c6642263da46c6b5049b0f11b1fdde3c1cd91 100644 (file)
                "show": true
             }
          ]
+      },
+      {
+         "columns": [ ],
+         "datasource": "$datasource",
+         "description": "This table shows the 10 OSDs with the highest number of slow ops",
+         "gridPos": {
+            "h": 8,
+            "w": 4,
+            "x": 0,
+            "y": 20
+         },
+         "id": 13,
+         "links": [ ],
+         "sort": {
+            "col": 2,
+            "desc": true
+         },
+         "styles": [
+            {
+               "alias": "OSD ID",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "ceph_daemon",
+               "thresholds": [ ],
+               "type": "string",
+               "unit": "short",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "Slow Ops",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "Value",
+               "thresholds": [ ],
+               "type": "number",
+               "unit": "none",
+               "valueMaps": [ ]
+            },
+            {
+               "alias": "",
+               "colorMode": null,
+               "colors": [
+                  "rgba(245, 54, 54, 0.9)",
+                  "rgba(237, 129, 40, 0.89)",
+                  "rgba(50, 172, 45, 0.97)"
+               ],
+               "dateFormat": "YYYY-MM-DD HH:mm:ss",
+               "decimals": 2,
+               "mappingType": 1,
+               "pattern": "/.*/",
+               "thresholds": [ ],
+               "type": "hidden",
+               "unit": "short",
+               "valueMaps": [ ]
+            }
+         ],
+         "targets": [
+            {
+               "expr": "topk(10,\n  (ceph_daemon_health_metrics{type=\"SLOW_OPS\", ceph_daemon=~\"osd.*\"})\n)\n",
+               "format": "table",
+               "instant": true,
+               "intervalFactor": 1,
+               "legendFormat": "",
+               "refId": "A"
+            }
+         ],
+         "timeFrom": null,
+         "timeShift": null,
+         "title": "Top Slow Ops",
+         "transform": "table",
+         "type": "table"
       }
    ],
    "refresh": "30s",
index bed89a8790648e7d10b70f31db33938783c67fd2..7977e4035ecb7b9ff054a1d0a09faa570e6bb791 100644 (file)
             description: '{{ $value }} OSD requests are taking too long to process (osd_op_complaint_time exceeded)',
           },
         },
+        {
+          alert: 'CephDaemonSlowOps',
+          'for': '30s',
+          expr: 'ceph_daemon_health_metrics{type="SLOW_OPS"} > 0',
+          labels: { severity: 'warning', type: 'ceph_default' },
+          annotations: {
+            documentation: 'https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops',
+            summary: '{{ $labels.ceph_daemon }} operations are slow to complete',
+            description: '{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)',
+          },
+        },
       ],
     },
     {
index a544d41eb0ee0ac15806a52c93ea5c36856c20cf..3b560342e765469cbb766e004487d365723f4e3a 100644 (file)
@@ -563,6 +563,16 @@ groups:
         labels:
           severity: "warning"
           type: "ceph_default"
+      - alert: "CephDaemonSlowOps"
+        for: "30s"
+        expr: "ceph_daemon_health_metrics{type=\"SLOW_OPS\"} > 0"
+        labels: 
+          severity: 'warning'
+          type: 'ceph_default'
+        annotations:
+          summary: "{{ $labels.ceph_daemon }} operations are slow to complete"
+          description: "{{ $labels.ceph_daemon }} operations are taking too long to process (complaint time exceeded)"
+          documentation: "https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops"
   - name: "cephadm"
     rules:
       - alert: "CephadmUpgradeFailed"
index 7b7e7db7301bddc9f0d55e2ca2b56198fb89652c..ec90743d41768768570a54488918234bd1c435db 100644 (file)
@@ -679,6 +679,33 @@ tests:
            summary: OSD operations are slow to complete
            description: "1 OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
 
+ # slow daemon ops
+ - interval : 1m
+   input_series:
+    - series: 'ceph_daemon_health_metrics{ceph_daemon="osd.1", instance="ceph:9283",job="ceph", type="SLOW_OPS"}'
+      values: '1+0x120'
+   promql_expr_test:
+     - expr: 'ceph_daemon_health_metrics{type="SLOW_OPS"} > 0'
+       eval_time: 1m
+       exp_samples:
+         - labels: '{__name__="ceph_daemon_health_metrics", ceph_daemon="osd.1",instance="ceph:9283",
+           job="ceph", type="SLOW_OPS"}'
+           value: 1
+   alert_rule_test:
+     - eval_time: 20m
+       alertname: CephDaemonSlowOps
+       exp_alerts:
+       - exp_labels:
+           instance: ceph:9283
+           ceph_daemon: "osd.1"
+           job: ceph
+           severity: warning
+           type: ceph_default
+         exp_annotations:
+           documentation: https://docs.ceph.com/en/latest/rados/operations/health-checks#slow-ops
+           summary: osd.1 operations are slow to complete
+           description: "osd.1 operations are taking too long to process (complaint time exceeded)"
+
 # CEPHADM orchestrator alert triggers
  - interval: 30s
    input_series:
index e62e93b3090667ccd83c146618fc2167b2d549e1..6a8525b37b28ef4743b9d3cf78c4fd0b09028ed5 100644 (file)
@@ -1511,3 +1511,26 @@ void ActivePyModules::unregister_client(std::string_view name, std::string addrs
   dout(7) << "unregistering msgr client handle " << addrv << dendl;
   py_module_registry.unregister_client(name, addrv);
 }
+
+PyObject* ActivePyModules::get_daemon_health_metrics()
+{
+  without_gil_t no_gil;
+  return daemon_state.with_daemons_by_server([&no_gil]
+      (const std::map<std::string, DaemonStateCollection> &all) {
+      no_gil.acquire_gil();
+      PyFormatter f;
+      for (const auto &[hostname, daemon_state] : all) {
+        for (const auto &[key, state] : daemon_state) {
+          f.open_array_section(ceph::to_string(key));
+          for (const auto &metric : state->daemon_health_metrics) {
+            f.open_object_section(metric.get_type_name());
+            f.dump_int("value", metric.get_n1());
+            f.dump_string("type", metric.get_type_name());
+            f.close_section();
+          }
+          f.close_section();
+        }
+      }
+      return f.get();
+  });
+}
index d916bdccaf9a647480d523fa68cf07573c6302fc..2a22b98bf38f4f3d0a417e0988b1f1e6b065f465 100644 (file)
@@ -221,6 +221,7 @@ public:
 
   void cluster_log(const std::string &channel, clog_type prio,
     const std::string &message);
+  PyObject* get_daemon_health_metrics();
 
   bool inject_python_on() const;
   void update_cache_metrics();
index 3f49976d870ca5591589f36060c66ab2e2ec2a3f..b913d4c98fed7d8b9b0f5929cf82abc0b3ccea13 100644 (file)
@@ -1417,6 +1417,12 @@ ceph_unregister_client(BaseMgrModule *self, PyObject *args)
   Py_RETURN_NONE;
 }
 
+static PyObject*
+ceph_get_daemon_health_metrics(BaseMgrModule *self, PyObject *args)
+{
+  return self->py_modules->get_daemon_health_metrics();
+}
+
 PyMethodDef BaseMgrModule_methods[] = {
   {"_ceph_get", (PyCFunction)ceph_state_get, METH_VARARGS,
    "Get a cluster object"},
@@ -1543,6 +1549,9 @@ PyMethodDef BaseMgrModule_methods[] = {
   {"_ceph_unregister_client", (PyCFunction)ceph_unregister_client,
     METH_VARARGS, "Unregister RADOS instance for potential blocklisting"},
 
+  {"_ceph_get_daemon_health_metrics", (PyCFunction)ceph_get_daemon_health_metrics,
+    METH_VARARGS, "Get health metrics for all daemons"},
+
   {NULL, NULL, 0, NULL}
 };
 
index ad3ea29efd46b14916fb961759a50744a184e3e6..ce0dad2c87e317ee43e374a682ae57e325f2555e 100644 (file)
@@ -44,8 +44,9 @@ public:
     : type(type_), value(n)
   {}
   DaemonHealthMetric(daemon_metric type_, uint32_t n1, uint32_t n2)
-    : type(type_), value(n1, n2)
+    : type(type_), value(n1, n2) 
   {}
+
   daemon_metric get_type() const {
     return type;
   }
@@ -58,6 +59,7 @@ public:
   uint32_t get_n2() const {
     return value.n2;
   }
+
   DENC(DaemonHealthMetric, v, p) {
     DENC_START(1, 1, p);
     denc(v.type, p);
@@ -65,6 +67,10 @@ public:
     DENC_FINISH(p);
   }
 
+  std::string get_type_name() const {
+    return daemon_metric_name(get_type());
+  }
+
   friend std::ostream& operator<<(std::ostream& out, const DaemonHealthMetric& m) {
     return out << daemon_metric_name(m.get_type()) << "("
               << m.get_n() << "|(" << m.get_n1() << "," << m.get_n2() << "))";
index 430911f6f57f12a47afc260525862d3203064df9..15cd9f0e7e02e0b3e46555b2a57b5ece2a80c118 100644 (file)
@@ -2639,8 +2639,6 @@ void DaemonServer::send_report()
                 << std::dec << dendl;
             continue;
           }
-         dout(20) << " + " << state->key << " "
-                  << metric << dendl;
           tie(acc, std::ignore) = accumulated.emplace(metric.get_type(),
               std::move(collector));
         }
index d853fbf8204ce6b4a07c7d1075fda6c33ffdb6e8..2d8c8cbaba762cb0498432777650f7080773bfd7 100644 (file)
@@ -112,3 +112,4 @@ class BaseMgrModule(object):
     def _ceph_unregister_client(self, addrs: str) -> None: ...
     def _ceph_register_client(self, addrs: str) -> None: ...
     def _ceph_is_authorized(self, arguments: Dict[str, str]) -> bool: ...
+    def _ceph_get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]: ...
index 89629527a57245a6618634c21affa6e563975fe4..d1b6e7dc96e77a255a9bdad55aaf40ed88d9eeef 100644 (file)
@@ -1963,6 +1963,13 @@ class MgrModule(ceph_module.BaseMgrModule, MgrModuleLoggingMixin):
         """
         return self._ceph_get_mds_perf_counters(query_id)
 
+    def get_daemon_health_metrics(self) -> Dict[str, List[Dict[str, Any]]]:
+        """
+        Get the list of health metrics per daemon. This includes SLOW_OPS health metrics
+        in MON and OSD daemons, and PENDING_CREATING_PGS health metrics for OSDs.
+        """
+        return self._ceph_get_daemon_health_metrics()
+
     def is_authorized(self, arguments: Dict[str, str]) -> bool:
         """
         Verifies that the current session caps permit executing the py service
index dd225ad03bb5640814501af3372ce5473ff36609..0a76779a20949b5793639b833eaf60741134787c 100644 (file)
@@ -1564,6 +1564,21 @@ class Module(MgrModule):
             self.metrics[path].set(stats['stat_sum']['num_objects_repaired'],
                                    labelvalues=(stats['poolid'],))
 
+    def get_all_daemon_health_metrics(self) -> None:
+        daemon_metrics = self.get_daemon_health_metrics()
+        self.log.debug('metrics jeje %s' % (daemon_metrics))
+        for daemon_name, health_metrics in daemon_metrics.items():
+            for health_metric in health_metrics:
+                path = f'daemon_health_metrics{daemon_name}{health_metric["type"]}'
+                self.metrics[path] = Metric(
+                    'counter',
+                    'daemon_health_metrics',
+                    'Health metrics for Ceph daemons',
+                    ('type', 'ceph_daemon',)
+                )
+                self.metrics[path].set(health_metric['value'], labelvalues=(
+                    health_metric['type'], daemon_name,))
+
     @profile_method(True)
     def collect(self) -> str:
         # Clear the metrics before scraping
@@ -1581,6 +1596,7 @@ class Module(MgrModule):
         self.get_pg_status()
         self.get_pg_repaired_objects()
         self.get_num_objects()
+        self.get_all_daemon_health_metrics()
 
         for daemon, counters in self.get_all_perf_counters().items():
             for path, counter_info in counters.items():