From: xie xingguo Date: Tue, 18 Jun 2019 12:05:31 +0000 (+0800) Subject: mgr, osd: 'ceph osd df' by pool X-Git-Tag: v15.1.0~2422^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F28629%2Fhead;p=ceph.git mgr, osd: 'ceph osd df' by pool Our test admin has been asking for this for the past few years:-) Besides, this is also useful for operating on large Ceph clusters with mutliple storage pools possibly spanning over all osds. Signed-off-by: xie xingguo --- diff --git a/qa/standalone/mon/osd-df.sh b/qa/standalone/mon/osd-df.sh new file mode 100755 index 000000000000..962909fdbd65 --- /dev/null +++ b/qa/standalone/mon/osd-df.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_osd_df() { + local dir=$1 + setup $dir || return 1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + # normal case + ceph osd df --f json-pretty | grep osd.0 || return 1 + ceph osd df --f json-pretty | grep osd.1 || return 1 + ceph osd df --f json-pretty | grep osd.2 || return 1 + ceph osd df --f json-pretty | grep osd.3 || return 1 + ceph osd df --f json-pretty | grep osd.4 || return 1 + ceph osd df --f json-pretty | grep osd.5 || return 1 + + # filter by device class + osd_class=$(ceph osd crush get-device-class 0) + ceph osd df class $osd_class --f json-pretty | grep 'osd.0' || return 1 + # post-nautilus we require filter-type no more + ceph osd df $osd_class --f json-pretty | grep 'osd.0' || return 1 + ceph osd crush rm-device-class 0 || return 1 + ceph osd crush set-device-class aaa 0 || return 1 + ceph osd df aaa --f json-pretty | grep 'osd.0' || return 1 + ceph osd df aaa --f json-pretty | grep 'osd.1' && return 1 + # reset osd.1's device class + ceph osd crush rm-device-class 0 || return 1 + ceph osd crush set-device-class $osd_class 0 || return 1 + + # filter by crush node + ceph osd df osd.0 --f json-pretty | grep osd.0 || return 1 + ceph osd df osd.0 --f json-pretty | grep osd.1 && return 1 + ceph osd crush move osd.0 root=default host=foo || return 1 + ceph osd crush move osd.1 root=default host=foo || return 1 + ceph osd crush move osd.2 root=default host=foo || return 1 + ceph osd crush move osd.3 root=default host=bar || return 1 + ceph osd crush move osd.4 root=default host=bar || return 1 + ceph osd crush move osd.5 root=default host=bar || return 1 + ceph osd df tree foo --f json-pretty | grep foo || return 1 + ceph osd df tree foo --f json-pretty | grep bar && return 1 + ceph osd df foo --f json-pretty | grep osd.0 || return 1 + ceph osd df foo --f json-pretty | grep osd.1 || return 1 + ceph osd df foo --f json-pretty | grep osd.2 || return 1 + ceph osd df foo --f json-pretty | grep osd.3 && return 1 + ceph osd df foo --f json-pretty | grep osd.4 && return 1 + ceph osd df foo --f json-pretty | grep osd.5 && return 1 + ceph osd df tree bar --f json-pretty | grep bar || return 1 + ceph osd df tree bar --f json-pretty | grep foo && return 1 + ceph osd df bar --f json-pretty | grep osd.0 && return 1 + ceph osd df bar --f json-pretty | grep osd.1 && return 1 + ceph osd df bar --f json-pretty | grep osd.2 && return 1 + ceph osd df bar --f json-pretty | grep osd.3 || return 1 + ceph osd df bar --f json-pretty | grep osd.4 || return 1 + ceph osd df bar --f json-pretty | grep osd.5 || return 1 + + # filter by pool + ceph osd crush rm-device-class all || return 1 + ceph osd crush set-device-class nvme 0 1 3 4 || return 1 + ceph osd crush rule create-replicated nvme-rule default host nvme || return 1 + ceph osd pool create nvme-pool 12 12 nvme-rule || return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.0 || return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.1 || return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.2 && return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.3 || return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.4 || return 1 + ceph osd df nvme-pool --f json-pretty | grep osd.5 && return 1 + + teardown $dir || return 1 +} + +main osd-df "$@" diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 7585d03024b8..ad6aa6c1bcdb 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -723,15 +723,6 @@ function test_mon_misc() ceph --concise osd dump | grep '^epoch' ceph osd df | grep 'MIN/MAX VAR' - osd_class=$(ceph osd crush get-device-class 0) - ceph osd df tree class $osd_class | grep 'osd.0' - ceph osd crush rm-device-class 0 - # create class first in case old device class may - # have already been automatically destroyed - ceph osd crush class create $osd_class - ceph osd df tree class $osd_class | expect_false grep 'osd.0' - ceph osd crush set-device-class $osd_class 0 - ceph osd df tree name osd.0 | grep 'osd.0' # df ceph df > $TMPFILE diff --git a/src/mgr/DaemonServer.cc b/src/mgr/DaemonServer.cc index 6a63aca14240..1f075795c581 100644 --- a/src/mgr/DaemonServer.cc +++ b/src/mgr/DaemonServer.cc @@ -1216,30 +1216,18 @@ bool DaemonServer::_handle_command( return true; } } else if (prefix == "osd df") { - string method; + string method, filter; cmd_getval(g_ceph_context, cmdctx->cmdmap, "output_method", method); - string filter_by; - string filter; - cmd_getval(g_ceph_context, cmdctx->cmdmap, "filter_by", filter_by); cmd_getval(g_ceph_context, cmdctx->cmdmap, "filter", filter); - if (filter_by.empty() != filter.empty()) { - cmdctx->reply(-EINVAL, "you must specify both 'filter_by' and 'filter'"); - return true; - } stringstream rs; r = cluster_state.with_osdmap_and_pgmap([&](const OSDMap& osdmap, const PGMap& pgmap) { // sanity check filter(s) - if (filter_by == "class") { - if (!osdmap.crush->class_exists(filter)) { - rs << "specified class '" << filter << "' does not exist"; - return -EINVAL; - } - } - if (filter_by == "name") { - if (!osdmap.crush->name_exists(filter)) { - rs << "specified name '" << filter << "' does not exist"; - return -EINVAL; - } + if (!filter.empty() && + osdmap.lookup_pg_pool_name(filter) < 0 && + !osdmap.crush->class_exists(filter) && + !osdmap.crush->name_exists(filter)) { + rs << "'" << filter << "' not a pool, crush node or device class name"; + return -EINVAL; } print_osd_utilization(osdmap, pgmap, ss, f.get(), method == "tree", filter); diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index e276534708fa..2f6b2bbc3a82 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -4991,6 +4991,15 @@ public: } else if (osdmap->crush->class_exists(filter)) { // filter by device class class_id = osdmap->crush->get_class_id(filter); + } else if (auto pool_id = osdmap->lookup_pg_pool_name(filter); + pool_id >= 0) { + // filter by pool + auto crush_rule = osdmap->get_pool_crush_rule(pool_id); + set roots; + osdmap->crush->find_takes_by_rule(crush_rule, &roots); + allowed = roots; + for (auto r : roots) + osdmap->crush->get_all_children(r, &allowed); } average_util = average_utilization(); } diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index fd227a99a0c9..37094119e559 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -1332,6 +1332,12 @@ public: ceph_assert(p != pools.end()); return p->second.get_type(); } + int get_pool_crush_rule(int64_t pool_id) const { + auto pool = get_pg_pool(pool_id); + if (!pool) + return -ENOENT; + return pool->get_crush_rule(); + } pg_t raw_pg_to_pg(pg_t pg) const {