]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
os/bluestore: simplify multithreaded shallow fsck.
authorIgor Fedotov <ifedotov@suse.com>
Thu, 7 Nov 2019 20:14:32 +0000 (23:14 +0300)
committerIgor Fedotov <ifedotov@suse.com>
Thu, 7 Nov 2019 20:25:35 +0000 (23:25 +0300)
This patch gets rid off passing expecting_shards when doing
multithreaded fsck. Currrently the latter is used for shallow fsck only
which doesn't need such a container.
Moreover current implementation lacks multithreading protection which might
cause issues in future.
Hence simplifying and removing totally.

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
src/os/bluestore/BlueStore.cc
src/os/bluestore/BlueStore.h

index c2b77bb47cfbe6d1c709597d9057f8c2b677226c..dee3844a7e611fbd62527cbf4690edd20958250a 100644 (file)
@@ -7184,7 +7184,7 @@ BlueStore::OnodeRef BlueStore::fsck_check_objects_shallow(
   const ghobject_t& oid,
   const string& key,
   const bufferlist& value,
-  mempool::bluestore_fsck::list<string>& expecting_shards,
+  mempool::bluestore_fsck::list<string>* expecting_shards,
   map<BlobRef, bluestore_blob_t::unused_t>* referenced,
   const BlueStore::FSCK_ObjectCtx& ctx)
 {
@@ -7216,11 +7216,12 @@ BlueStore::OnodeRef BlueStore::fsck_check_objects_shallow(
   if (!o->extent_map.shards.empty()) {
     ++num_sharded_objects;
     if (depth != FSCK_SHALLOW) {
+      ceph_assert(expecting_shards);
       for (auto& s : o->extent_map.shards) {
         dout(20) << __func__ << "    shard " << *s.shard_info << dendl;
-        expecting_shards.push_back(string());
+        expecting_shards->push_back(string());
         get_extent_shard_key(o->key, s.shard_info->offset,
-          &expecting_shards.back());
+          &expecting_shards->back());
         if (s.shard_info->offset >= o->onode.size) {
           derr << "fsck error: " << oid << " shard 0x" << std::hex
             << s.shard_info->offset << " past EOF at 0x" << o->onode.size
@@ -7412,7 +7413,6 @@ public:
     size_t batchCount;
     BlueStore* store = nullptr;
 
-    mempool::bluestore_fsck::list<string>* expecting_shards = nullptr;
     ceph::mutex* sb_info_lock = nullptr;
     BlueStore::sb_info_map_t* sb_info = nullptr;
     BlueStoreRepairer* repairer = nullptr;
@@ -7424,14 +7424,12 @@ public:
     FSCKWorkQueue(std::string n,
                   size_t _batchCount,
                   BlueStore* _store,
-                  mempool::bluestore_fsck::list<string>& _expecting_shards,
                   ceph::mutex* _sb_info_lock,
                   BlueStore::sb_info_map_t& _sb_info,
                   BlueStoreRepairer* _repairer) :
       WorkQueue_(n, time_t(), time_t()),
       batchCount(_batchCount),
       store(_store),
-      expecting_shards(&_expecting_shards),
       sb_info_lock(_sb_info_lock),
       sb_info(&_sb_info),
       repairer(_repairer)
@@ -7502,7 +7500,7 @@ public:
           entry.oid,
           entry.key,
           entry.value,
-          *expecting_shards,
+          nullptr, // expecting_shards - this will need a protection if passed
           nullptr, // referenced
           ctx);
       }
@@ -7640,7 +7638,6 @@ void BlueStore::_fsck_check_objects(FSCKDepth depth,
         "FSCKWorkQueue",
         (thread_count ? : 1) * 32,
         this,
-        expecting_shards,
         sb_info_lock,
         sb_info,
         repairer));
@@ -7767,7 +7764,7 @@ void BlueStore::_fsck_check_objects(FSCKDepth depth,
           oid,
           it->key(),
           it->value(),
-          expecting_shards,
+          &expecting_shards,
           &referenced,
           ctx);
       }
index 9baeca02188298d260f21d8f8a1460a62ddf74a3..f80de7528cc0ddc4ab47d42c7021d854a17e140a 100644 (file)
@@ -3260,7 +3260,7 @@ public:
     const ghobject_t& oid,
     const string& key,
     const bufferlist& value,
-    mempool::bluestore_fsck::list<string>& expecting_shards,
+    mempool::bluestore_fsck::list<string>* expecting_shards,
     map<BlobRef, bluestore_blob_t::unused_t>* referenced,
     const BlueStore::FSCK_ObjectCtx& ctx);