From: Joao Eduardo Luis Date: Mon, 30 Jun 2014 16:51:47 +0000 (+0100) Subject: mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to get it X-Git-Tag: v0.84~159^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=ddc04c83ff6842ca0b2f804b46099ea729b9fb6b;p=ceph.git mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to get it Add support to get the values for the following variables: - target_max_objects - target_max_bytes - cache_target_dirty_ratio - cache_target_full_ratio - cache_min_flush_age - cache_min_evict_age Signed-off-by: Joao Eduardo Luis --- diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 3c1387af2b3a..69034de48092 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -644,17 +644,30 @@ function test_mon_osd_pool_set() ceph osd pool get rbd hit_set_fpp | grep "hit_set_fpp: 0.01" ceph osd pool set rbd target_max_objects 123 + ceph osd pool get rbd target_max_objects | \ + grep 'target_max_objects:[ \t]\+123' ceph osd pool set rbd target_max_bytes 123456 + ceph osd pool get rbd target_max_bytes | \ + grep 'target_max_bytes:[ \t]\+123456' ceph osd pool set rbd cache_target_dirty_ratio .123 + ceph osd pool get rbd cache_target_dirty_ratio | \ + grep 'cache_target_dirty_ratio:[ \t]\+0.123' expect_false ceph osd pool set rbd cache_target_dirty_ratio -.2 expect_false ceph osd pool set rbd cache_target_dirty_ratio 1.1 ceph osd pool set rbd cache_target_full_ratio .123 + ceph osd pool get rbd cache_target_full_ratio | \ + grep 'cache_target_full_ratio:[ \t]\+0.123' ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000' ceph osd pool set rbd cache_target_full_ratio 1.0 ceph osd pool set rbd cache_target_full_ratio 0 expect_false ceph osd pool set rbd cache_target_full_ratio 1.1 ceph osd pool set rbd cache_min_flush_age 123 + ceph osd pool get rbd cache_min_flush_age | \ + grep 'cache_min_flush_age:[ \t]\+123' ceph osd pool set rbd cache_min_evict_age 234 + ceph osd pool get rbd cache_min_evict_age | \ + grep 'cache_min_evict_age:[ \t]\+234' + ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0' } diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index 993589360ebd..b9f092ce1417 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -557,7 +557,7 @@ COMMAND("osd pool rename " \ "rename to ", "osd", "rw", "cli,rest") COMMAND("osd pool get " \ "name=pool,type=CephPoolname " \ - "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid", \ + "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age", \ "get pool parameter ", "osd", "r", "cli,rest") COMMAND("osd pool set " \ "name=pool,type=CephPoolname " \ diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 16c281cc48b0..7c17a14b0e8d 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -2505,6 +2505,24 @@ bool OSDMonitor::preprocess_command(MMonCommand *m) BloomHitSet::Params *bloomp = static_cast(p->hit_set_params.impl.get()); f->dump_float("hit_set_fpp", bloomp->get_fpp()); } + } else if (var == "target_max_objects") { + f->dump_unsigned("target_max_objects", p->target_max_objects); + } else if (var == "target_max_bytes") { + f->dump_unsigned("target_max_bytes", p->target_max_bytes); + } else if (var == "cache_target_dirty_ratio") { + f->dump_unsigned("cache_target_dirty_ratio_micro", + p->cache_target_dirty_ratio_micro); + f->dump_float("cache_target_dirty_ratio", + ((float)p->cache_target_dirty_ratio_micro/1000000)); + } else if (var == "cache_target_full_ratio") { + f->dump_unsigned("cache_target_full_ratio_micro", + p->cache_target_full_ratio_micro); + f->dump_float("cache_target_full_ratio", + ((float)p->cache_target_full_ratio_micro/1000000)); + } else if (var == "cache_min_flush_age") { + f->dump_unsigned("cache_min_flush_age", p->cache_min_flush_age); + } else if (var == "cache_min_evict_age") { + f->dump_unsigned("cache_min_evict_age", p->cache_min_evict_age); } f->close_section(); @@ -2538,7 +2556,22 @@ bool OSDMonitor::preprocess_command(MMonCommand *m) } BloomHitSet::Params *bloomp = static_cast(p->hit_set_params.impl.get()); ss << "hit_set_fpp: " << bloomp->get_fpp(); + } else if (var == "target_max_objects") { + ss << "target_max_objects: " << p->target_max_objects; + } else if (var == "target_max_bytes") { + ss << "target_max_bytes: " << p->target_max_bytes; + } else if (var == "cache_target_dirty_ratio") { + ss << "cache_target_dirty_ratio: " + << ((float)p->cache_target_dirty_ratio_micro/1000000); + } else if (var == "cache_target_full_ratio") { + ss << "cache_target_full_ratio: " + << ((float)p->cache_target_full_ratio_micro/1000000); + } else if (var == "cache_min_flush_age") { + ss << "cache_min_flush_age: " << p->cache_min_flush_age; + } else if (var == "cache_min_evict_age") { + ss << "cache_min_evict_age: " << p->cache_min_evict_age; } + rdata.append(ss); ss.str(""); }