From: Dmitriy Rabotjagov Date: Fri, 10 May 2019 12:18:58 +0000 (+0300) Subject: mgr/zabbix: Fix raw_bytes_used key name X-Git-Tag: v15.1.0~2634^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=c336882cf696b56b82134de009cbd38929c026fc;p=ceph.git mgr/zabbix: Fix raw_bytes_used key name This patch fixes raw_bytes_used key which was renamed to stored_raw. Also added key percent_used and fixed zabbix template to be fully compatible with zabbix 3.0 Fixes: https://tracker.ceph.com/issues/39644 Signed-off-by: Dmitriy Rabotjagov --- diff --git a/PendingReleaseNotes b/PendingReleaseNotes index 6941d48843b4..c1cff457546e 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -65,3 +65,8 @@ was 'wait_backfill' and the correct name is 'backfill_wait'. Update your Zabbix template accordingly so that it accepts the new key being send to Zabbix. + +* zabbix plugin for ceph manager now includes osd and pool + discovery. Update of zabbix_template.xml is needed + to receive per-pool (read/write throughput, diskspace usage) + and per-osd (latency, status, pgs) statistics diff --git a/src/pybind/mgr/zabbix/module.py b/src/pybind/mgr/zabbix/module.py index 531018188726..3fbe86ec7d47 100644 --- a/src/pybind/mgr/zabbix/module.py +++ b/src/pybind/mgr/zabbix/module.py @@ -206,7 +206,8 @@ class Module(MgrModule): data['[{0},rd_ops]'.format(pool['name'])] = pool['stats']['rd'] data['[{0},wr_ops]'.format(pool['name'])] = pool['stats']['wr'] data['[{0},bytes_used]'.format(pool['name'])] = pool['stats']['bytes_used'] - data['[{0},raw_bytes_used]'.format(pool['name'])] = pool['stats']['raw_bytes_used'] + data['[{0},stored_raw]'.format(pool['name'])] = pool['stats']['stored_raw'] + data['[{0},percent_used]'.format(pool['name'])] = pool['stats']['percent_used'] data['wr_ops'] = wr_ops data['rd_ops'] = rd_ops diff --git a/src/pybind/mgr/zabbix/zabbix_template.xml b/src/pybind/mgr/zabbix/zabbix_template.xml index e2a8344ec81f..17bff78234c0 100644 --- a/src/pybind/mgr/zabbix/zabbix_template.xml +++ b/src/pybind/mgr/zabbix/zabbix_template.xml @@ -1976,6 +1976,7 @@ 0 + 0 @@ -1996,6 +1997,7 @@ [osd.{#OSD}] OSD in 2 + 0 ceph.[osd.{#OSD},in] 0 @@ -2017,6 +2019,7 @@ + 0 0 @@ -2028,19 +2031,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [osd.{#OSD}] OSD PGs 2 + 0 ceph.[osd.{#OSD},num_pgs] 0 @@ -2062,6 +2063,7 @@ + 0 0 @@ -2073,19 +2075,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [osd.{#OSD}] OSD fill 2 + 0 ceph.[osd.{#OSD},osd_fill] 0 @@ -2107,6 +2107,7 @@ + 0 0 @@ -2118,19 +2119,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [osd.{#OSD}] OSD latency apply 2 + 0 ceph.[osd.{#OSD},osd_latency_apply] 0 @@ -2152,6 +2151,7 @@ + 0 0 @@ -2163,19 +2163,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [osd.{#OSD}] OSD latency commit 2 + 0 ceph.[osd.{#OSD},osd_latency_commit] 0 @@ -2197,6 +2195,7 @@ + 0 0 @@ -2208,19 +2207,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [osd.{#OSD}] OSD up 2 + 0 ceph.[osd.{#OSD},up] 0 @@ -2242,6 +2239,7 @@ + 0 0 @@ -2253,69 +2251,47 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - {ceph-mgr Zabbix module:ceph.[osd.{#OSD},up].last()}=0 - 0 - Ceph OSD osd.{#OSD} is DOWN - 0 - 0 2 0 - 0 - {ceph-mgr Zabbix module:ceph.[osd.{#OSD},osd_fill].last()}>={ceph-mgr Zabbix module:ceph.osd_full_ratio.last()} - 0 - Ceph OSD osd.{#OSD} is full: {ITEM.VALUE}% - 0 - 0 4 0 - 0 - {ceph-mgr Zabbix module:ceph.[osd.{#OSD},osd_fill].last()}>={ceph-mgr Zabbix module:ceph.osd_nearfull_ratio.last()} - 0 - Ceph OSD osd.{#OSD} is near full: {ITEM.VALUE}% - 0 - 0 2 0 - 0 - - Ceph pool discovery @@ -2333,6 +2309,7 @@ 0 + 0 @@ -2353,6 +2330,7 @@ [{#POOL}] Pool Used 2 + 0 ceph.[{#POOL},bytes_used] 0 @@ -2374,6 +2352,7 @@ + 0 0 @@ -2385,21 +2364,19 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [{#POOL}] Pool RAW Used 2 + 0 - ceph.[{#POOL},raw_bytes_used] + ceph.[{#POOL},stored_raw] 0 90 365 @@ -2419,6 +2396,51 @@ + 0 + 0 + + + + + + + 0 + + + + + + Ceph CRUSH [{#CRUSH_RULE}] + + + + + [{#POOL}] Pool Percent Used + 2 + + 0 + + ceph.[{#POOL},percent_used] + 0 + 90 + 365 + 0 + 3 + + % + 0 + + + 0 + 0 + + 0 + + 1 + + + + 0 0 @@ -2430,19 +2452,17 @@ - - Ceph CRUSH [{#CRUSH_RULE}] - [{#POOL}] Pool Read bandwidth 2 + 0 ceph.[{#POOL},rd_bytes] 0 @@ -2464,6 +2484,7 @@ + 0 0 @@ -2475,24 +2496,17 @@ - - - 10 - - - - Ceph CRUSH [{#CRUSH_RULE}] - [{#POOL}] Pool Read operations 2 + 0 ceph.[{#POOL},rd_ops] 0 @@ -2514,6 +2528,7 @@ + 0 0 @@ -2525,24 +2540,17 @@ - - - 10 - - - - Ceph CRUSH [{#CRUSH_RULE}] - [{#POOL}] Pool Write bandwidth 2 + 0 ceph.[{#POOL},wr_bytes] 0 @@ -2564,6 +2572,7 @@ + 0 0 @@ -2575,24 +2584,17 @@ - - - 10 - - - - Ceph CRUSH [{#CRUSH_RULE}] - [{#POOL}] Pool Write operations 2 + 0 ceph.[{#POOL},wr_ops] 0 @@ -2614,6 +2616,7 @@ + 0 0 @@ -2625,25 +2628,16 @@ - - - 10 - - - - Ceph CRUSH [{#CRUSH_RULE}] - -