]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test/store_test: test coverage for anti-spillover framework. 29687/head
authorIgor Fedotov <ifedotov@suse.com>
Fri, 30 Aug 2019 15:04:21 +0000 (18:04 +0300)
committerIgor Fedotov <ifedotov@suse.com>
Tue, 26 Nov 2019 19:51:17 +0000 (22:51 +0300)
Signed-off-by: Igor Fedotov <ifedotov@suse.com>
src/os/bluestore/BlueFS.h
src/os/bluestore/BlueStore.h
src/test/objectstore/store_test.cc

index 9bca340fed69c51c6e74c0130337834d31ecd8e5..3ae173f80ffbf37e81e1b7dd8c23d1cff099196c 100644 (file)
@@ -585,6 +585,9 @@ public:
 
   /// test purpose methods
   void debug_inject_duplicate_gift(unsigned bdev, uint64_t offset, uint64_t len);
+  const PerfCounters* get_perf_counters() const {
+    return logger;
+  }
 };
 
 class OriginalVolumeSelector : public BlueFSVolumeSelector {
index 6b97d454d73541551f580788308b988b285acc0e..5b308cc55073899cdb09d190eec5649503570245 100644 (file)
@@ -2806,6 +2806,9 @@ public:
   const PerfCounters* get_perf_counters() const override {
     return logger;
   }
+  const PerfCounters* get_bluefs_perf_counters() const {
+    return bluefs->get_perf_counters();
+  }
 
   int queue_transactions(
     CollectionHandle& ch,
index 0835ffb2d3db3807843a5a30915bf38d0f306f6f..6a460a3ab10bf699446bff20fe9ee1b948bfa955 100644 (file)
 #include "os/filestore/FileStore.h"
 #if defined(WITH_BLUESTORE)
 #include "os/bluestore/BlueStore.h"
+#include "os/bluestore/BlueFS.h"
 #endif
 #include "include/Context.h"
 #include "common/ceph_argparse.h"
+#include "common/admin_socket.h"
 #include "global/global_init.h"
 #include "common/ceph_mutex.h"
 #include "common/Cond.h"
@@ -7988,6 +7990,119 @@ TEST_P(StoreTestSpecificAUSize, ReproNoBlobMultiTest) {
   }
 }
 
+void doManySetAttr(ObjectStore* store,
+  std::function<void(ObjectStore*)> do_check_fn)
+{
+  MixedGenerator gen(447);
+  gen_type rng(time(NULL));
+  coll_t cid(spg_t(pg_t(0, 447), shard_id_t::NO_SHARD));
+
+  SyntheticWorkloadState test_obj(store, &gen, &rng, cid, 40 * 1024, 4 * 1024, 0);
+  test_obj.init();
+  for (int i = 0; i < 1500; ++i) {
+    if (!(i % 10)) cerr << "seeding object " << i << std::endl;
+    test_obj.touch();
+  }
+  for (int i = 0; i < 10000; ++i) {
+    if (!(i % 100)) {
+      cerr << "Op " << i << std::endl;
+      test_obj.print_internal_state();
+    }
+    boost::uniform_int<> true_false(0, 99);
+    test_obj.setattrs();
+  }
+  test_obj.wait_for_done();
+
+  AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
+  ceph_assert(admin_socket);
+
+  ceph::bufferlist in, out;
+  ostringstream err;
+
+  bool b = admin_socket->execute_command(
+    { "{\"prefix\": \"bluestore bluefs stats\"}" },
+    in, err, &out);
+  if (!b) {
+    cerr << "failure querying " << std::endl;
+  }
+  std::cout << std::string(out.c_str(), out.length()) << std::endl;
+  do_check_fn(store);
+  test_obj.shutdown();
+}
+
+TEST_P(StoreTestSpecificAUSize, SpilloverTest) {
+  if (string(GetParam()) != "bluestore")
+    return;
+
+  SetVal(g_conf(), "bluestore_block_db_create", "true");
+  SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
+  SetVal(g_conf(), "bluestore_volume_selection_policy", "rocksdb_original");
+
+  g_conf().apply_changes(nullptr);
+
+  StartDeferred(65536);
+  doManySetAttr(store.get(),
+    [&](ObjectStore* _store) {
+
+      BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
+      ceph_assert(bstore);
+      const PerfCounters* logger = bstore->get_bluefs_perf_counters();
+      //experimentally it was discovered that this case results in 400+MB spillover
+      //using lower 300MB threshold just to be safe enough
+      ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024);
+
+    }
+  );
+}
+
+TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) {
+  if (string(GetParam()) != "bluestore")
+    return;
+
+  SetVal(g_conf(), "bluestore_block_db_create", "true");
+  SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
+  SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra");
+  SetVal(g_conf(), "bluestore_volume_selection_reserved", "1"); // just use non-zero to enable
+
+  g_conf().apply_changes(nullptr);
+
+  StartDeferred(65536);
+  doManySetAttr(store.get(),
+    [&](ObjectStore* _store) {
+
+      BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
+      ceph_assert(bstore);
+      const PerfCounters* logger = bstore->get_bluefs_perf_counters();
+      ASSERT_EQ(0, logger->get(l_bluefs_slow_used_bytes));
+    }
+  );
+}
+
+TEST_P(StoreTestSpecificAUSize, SpilloverFixed2Test) {
+  if (string(GetParam()) != "bluestore")
+    return;
+
+  SetVal(g_conf(), "bluestore_block_db_create", "true");
+  SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
+  SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra");
+  //default 2.0 factor results in too high threshold, using less value
+  // that results in less but still present spillover.
+  SetVal(g_conf(), "bluestore_volume_selection_reserved_factor", "0.5");
+
+  g_conf().apply_changes(nullptr);
+
+  StartDeferred(65536);
+  doManySetAttr(store.get(),
+    [&](ObjectStore* _store) {
+
+      BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
+      ceph_assert(bstore);
+      const PerfCounters* logger = bstore->get_bluefs_perf_counters();
+      ASSERT_LE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024); // see SpilloverTest for 300MB choice rationale
+    }
+  );
+}
+
 #endif  // WITH_BLUESTORE
 
 int main(int argc, char **argv) {