]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
tools/bluestore: Add command 'show-sharding' to ceph-bluestore-tool 39067/head
authorAdam Kupczyk <akupczyk@redhat.com>
Tue, 19 Jan 2021 14:07:16 +0000 (15:07 +0100)
committerNeha Ojha <nojha@redhat.com>
Mon, 25 Jan 2021 22:05:47 +0000 (22:05 +0000)
Add command 'show-sharding' to ceph-bluestore-tool.

Signed-off-by: Adam Kupczyk <akupczyk@redhat.com>
(cherry picked from commit 882714e0c90e7c2492af47adf885141e57aee783)

doc/man/8/ceph-bluestore-tool.rst
src/kv/RocksDBStore.cc
src/kv/RocksDBStore.h
src/os/bluestore/bluestore_tool.cc

index 2a1813e69683a0792335545ab96892b9949c6056..bb67ccc71e7e02e7181c7dfca7eeb4d135d02a0e 100644 (file)
@@ -24,6 +24,7 @@ Synopsis
 | **ceph-bluestore-tool** bluefs-bdev-migrate --path *osd path* --dev-target *new-device* --devs-source *device1* [--devs-source *device2*]
 | **ceph-bluestore-tool** free-dump|free-score --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ]
 | **ceph-bluestore-tool** reshard --path *osd path* --sharding *new sharding* [ --sharding-ctrl *control string* ]
+| **ceph-bluestore-tool** show-sharding --path *osd path*
 
 
 Description
@@ -109,6 +110,10 @@ Commands
    Interrupted resharding does not corrupt data. It is always possible to continue previous resharding,
    or select any other sharding scheme, including reverting to original one.
 
+:command:`show-sharding` --path *osd path*
+
+   Show sharding that is currently applied to BlueStore's RocksDB.
+
 Options
 =======
 
index c121734efafa486552637cba9d8b5268753f32f4..b7fa258638f41705bc3bb55d0a5104dc337b51b9 100644 (file)
@@ -3330,3 +3330,22 @@ int RocksDBStore::reshard(const std::string& new_sharding, const RocksDBStore::r
 
   return r;
 }
+
+bool RocksDBStore::get_sharding(std::string& sharding) {
+  rocksdb::Status status;
+  std::string stored_sharding_text;
+  bool result = false;
+  sharding.clear();
+
+  status = env->FileExists(sharding_def_file);
+  if (status.ok()) {
+    status = rocksdb::ReadFileToString(env,
+                                      sharding_def_file,
+                                      &stored_sharding_text);
+    if(status.ok()) {
+      result = true;
+      sharding = stored_sharding_text;
+    }
+  }
+  return result;
+}
index 969fe5bfe46d07195a8790222c0aba66c1067824..0735e115c36489d1acd0d0f1dbc60bb501774126 100644 (file)
@@ -516,7 +516,7 @@ public:
     bool   unittest_fail_after_successful_processing = false;
   };
   int reshard(const std::string& new_sharding, const resharding_ctrl* ctrl = nullptr);
-
+  bool get_sharding(std::string& sharding);
 
 };
 
index 275f73c4c5f6f05c7ca3196cab0bb35bc57fffaa..12293fdfd1b2c3bacdce6eb7cf98964850a24bd2 100644 (file)
@@ -275,7 +275,8 @@ int main(int argc, char **argv)
         "free-dump, "
         "free-score, "
         "bluefs-stats, "
-        "reshard")
+        "reshard, "
+        "show-sharding")
     ;
   po::options_description po_all("All options");
   po_all.add(po_options).add(po_positional);
@@ -942,12 +943,8 @@ int main(int argc, char **argv)
       cerr << "error preparing db environment: " << cpp_strerror(r) << std::endl;
       exit(EXIT_FAILURE);
     }
-    if (r < 0) {
-      cerr << "error starting k-v inside bluestore: " << cpp_strerror(r) << std::endl;
-      exit(EXIT_FAILURE);
-    }
-    RocksDBStore* rocks_db = dynamic_cast<RocksDBStore*>(db_ptr);
     ceph_assert(db_ptr);
+    RocksDBStore* rocks_db = dynamic_cast<RocksDBStore*>(db_ptr);
     ceph_assert(rocks_db);
     r = rocks_db->reshard(new_sharding, &ctrl);
     if (r < 0) {
@@ -956,6 +953,25 @@ int main(int argc, char **argv)
       cout << "reshard success" << std::endl;
     }
     bluestore.close_db_environment();
+  } else if (action == "show-sharding") {
+    BlueStore bluestore(cct.get(), path);
+    KeyValueDB *db_ptr;
+    int r = bluestore.open_db_environment(&db_ptr, false);
+    if (r < 0) {
+      cerr << "error preparing db environment: " << cpp_strerror(r) << std::endl;
+      exit(EXIT_FAILURE);
+    }
+    ceph_assert(db_ptr);
+    RocksDBStore* rocks_db = dynamic_cast<RocksDBStore*>(db_ptr);
+    ceph_assert(rocks_db);
+    std::string sharding;
+    bool res = rocks_db->get_sharding(sharding);
+    bluestore.close_db_environment();
+    if (!res) {
+      cerr << "failed to retrieve sharding def" << std::endl;
+      exit(EXIT_FAILURE);
+    }
+    cout << sharding << std::endl;
   } else {
     cerr << "unrecognized action " << action << std::endl;
     return 1;