]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mon: OSDMonitor: 'osd pool' - if we can set it, we must be able to get it 2046/head
authorJoao Eduardo Luis <joao.luis@inktank.com>
Mon, 30 Jun 2014 16:51:47 +0000 (17:51 +0100)
committerJoao Eduardo Luis <joao.luis@inktank.com>
Wed, 2 Jul 2014 18:35:31 +0000 (19:35 +0100)
Add support to get the values for the following variables:
 - target_max_objects
 - target_max_bytes
 - cache_target_dirty_ratio
 - cache_target_full_ratio
 - cache_min_flush_age
 - cache_min_evict_age

Signed-off-by: Joao Eduardo Luis <joao.luis@inktank.com>
qa/workunits/cephtool/test.sh
src/mon/MonCommands.h
src/mon/OSDMonitor.cc

index 3c1387af2b3a96ed57adaf9f1ef6a1d2b59f51fd..69034de480920865c97c90e97e3297289df73930 100755 (executable)
@@ -644,17 +644,30 @@ function test_mon_osd_pool_set()
   ceph osd pool get rbd hit_set_fpp | grep "hit_set_fpp: 0.01"
 
   ceph osd pool set rbd target_max_objects 123
+  ceph osd pool get rbd target_max_objects | \
+    grep 'target_max_objects:[ \t]\+123'
   ceph osd pool set rbd target_max_bytes 123456
+  ceph osd pool get rbd target_max_bytes | \
+    grep 'target_max_bytes:[ \t]\+123456'
   ceph osd pool set rbd cache_target_dirty_ratio .123
+  ceph osd pool get rbd cache_target_dirty_ratio | \
+    grep 'cache_target_dirty_ratio:[ \t]\+0.123'
   expect_false ceph osd pool set rbd cache_target_dirty_ratio -.2
   expect_false ceph osd pool set rbd cache_target_dirty_ratio 1.1
   ceph osd pool set rbd cache_target_full_ratio .123
+  ceph osd pool get rbd cache_target_full_ratio | \
+    grep 'cache_target_full_ratio:[ \t]\+0.123'
   ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000'
   ceph osd pool set rbd cache_target_full_ratio 1.0
   ceph osd pool set rbd cache_target_full_ratio 0
   expect_false ceph osd pool set rbd cache_target_full_ratio 1.1
   ceph osd pool set rbd cache_min_flush_age 123
+  ceph osd pool get rbd cache_min_flush_age | \
+    grep 'cache_min_flush_age:[ \t]\+123'
   ceph osd pool set rbd cache_min_evict_age 234
+  ceph osd pool get rbd cache_min_evict_age | \
+    grep 'cache_min_evict_age:[ \t]\+234'
+
 
   ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
 }
index 993589360ebd2de4884468c16b54e213cb3c04cc..b9f092ce1417794e9eedcbdde3165ebab8b5e213 100644 (file)
@@ -557,7 +557,7 @@ COMMAND("osd pool rename " \
        "rename <srcpool> to <destpool>", "osd", "rw", "cli,rest")
 COMMAND("osd pool get " \
        "name=pool,type=CephPoolname " \
-       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid", \
+       "name=var,type=CephChoices,strings=size|min_size|crash_replay_interval|pg_num|pgp_num|crush_ruleset|hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|auid|target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age", \
        "get pool parameter <var>", "osd", "r", "cli,rest")
 COMMAND("osd pool set " \
        "name=pool,type=CephPoolname " \
index 16c281cc48b049421d9d4129b08e49748a685b99..7c17a14b0e8d712eb8f0a4d55a3c660df7210306 100644 (file)
@@ -2505,6 +2505,24 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
          BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
          f->dump_float("hit_set_fpp", bloomp->get_fpp());
        }
+      } else if (var == "target_max_objects") {
+        f->dump_unsigned("target_max_objects", p->target_max_objects);
+      } else if (var == "target_max_bytes") {
+        f->dump_unsigned("target_max_bytes", p->target_max_bytes);
+      } else if (var == "cache_target_dirty_ratio") {
+        f->dump_unsigned("cache_target_dirty_ratio_micro",
+                         p->cache_target_dirty_ratio_micro);
+        f->dump_float("cache_target_dirty_ratio",
+                      ((float)p->cache_target_dirty_ratio_micro/1000000));
+      } else if (var == "cache_target_full_ratio") {
+        f->dump_unsigned("cache_target_full_ratio_micro",
+                         p->cache_target_full_ratio_micro);
+        f->dump_float("cache_target_full_ratio",
+                      ((float)p->cache_target_full_ratio_micro/1000000));
+      } else if (var == "cache_min_flush_age") {
+        f->dump_unsigned("cache_min_flush_age", p->cache_min_flush_age);
+      } else if (var == "cache_min_evict_age") {
+        f->dump_unsigned("cache_min_evict_age", p->cache_min_evict_age);
       }
 
       f->close_section();
@@ -2538,7 +2556,22 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
        }
        BloomHitSet::Params *bloomp = static_cast<BloomHitSet::Params*>(p->hit_set_params.impl.get());
        ss << "hit_set_fpp: " << bloomp->get_fpp();
+      } else if (var == "target_max_objects") {
+        ss << "target_max_objects: " << p->target_max_objects;
+      } else if (var == "target_max_bytes") {
+        ss << "target_max_bytes: " << p->target_max_bytes;
+      } else if (var == "cache_target_dirty_ratio") {
+        ss << "cache_target_dirty_ratio: "
+          << ((float)p->cache_target_dirty_ratio_micro/1000000);
+      } else if (var == "cache_target_full_ratio") {
+        ss << "cache_target_full_ratio: "
+          << ((float)p->cache_target_full_ratio_micro/1000000);
+      } else if (var == "cache_min_flush_age") {
+        ss << "cache_min_flush_age: " << p->cache_min_flush_age;
+      } else if (var == "cache_min_evict_age") {
+        ss << "cache_min_evict_age: " << p->cache_min_evict_age;
       }
+
       rdata.append(ss);
       ss.str("");
     }