]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw/multisite: add datalog entry when transitioning to 'DELETED' log layout
authorShilpa Jagannath <smanjara@redhat.com>
Fri, 12 Jan 2024 17:59:57 +0000 (12:59 -0500)
committerShilpa Jagannath <smanjara@redhat.com>
Fri, 4 Apr 2025 17:16:53 +0000 (13:16 -0400)
Signed-off-by: Shilpa Jagannath <smanjara@redhat.com>
src/rgw/driver/rados/rgw_bucket.cc
src/rgw/driver/rados/rgw_trim_bilog.cc
src/rgw/rgw_bucket_layout.h

index 9d02f21377a0e13b989d0d50c65953b8f6227ea3..634747c141a2cfa5009b11665788fce843e8680d 100644 (file)
@@ -2879,8 +2879,20 @@ int RGWBucketInstanceMetadataHandler::put_prepare(
     //bucket instance cleanup in multisite setup
     const auto& log = bci.info.layout.logs.back();
     if (bci.info.bucket_deleted() && log.layout.type != rgw::BucketLogType::Deleted) {
-      bci.info.layout.logs.push_back({0, {rgw::BucketLogType::Deleted}});
+      const auto index_log = bci.info.layout.logs.back();
+      const int shards_num = rgw::num_shards(index_log.layout.in_index);
+      bci.info.layout.logs.push_back({log.gen+1, {rgw::BucketLogType::Deleted}});
       ldpp_dout(dpp, 10) << "store log layout type: " <<  bci.info.layout.logs.back().layout.type << dendl;
+      for (int i = 0; i < shards_num; ++i) {
+        ldpp_dout(dpp, 10) << "adding to data_log shard_id: " << i << " of gen:" << index_log.gen << dendl;
+        ret = bihandler->svc.datalog_rados->add_entry(dpp, bci.info, index_log, i,
+                                                    null_yield);
+        if (ret < 0) {
+          ldpp_dout(dpp, 1) << "WARNING: failed writing data log for bucket="
+          << bci.info.bucket << ", shard_id=" << i << "of generation="
+          << index_log.gen << dendl;
+          } // datalog error is not fatal
+      }
     }
   }
 
index edc157cb6236fee10467347a36b43e1d840e7ca0..7d7dc08621f58851cd9be649484661dedebb271f 100644 (file)
@@ -559,13 +559,16 @@ int take_min_status(
   const uint64_t min_generation,
   std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator first,
   std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator last,
-  std::vector<std::string> *status) {
+  std::vector<std::string> *status, const DoutPrefixProvider *dpp) {
   for (auto peer = first; peer != last; ++peer) {
     // Peers on later generations don't get a say in the matter
     if (peer->generation > min_generation) {
       continue;
     }
     if (peer->shards.size() != status->size()) {
+    ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":"
+    << "ERROR: shards don't match. peer shard:" << peer->shards.size() << " my shards:" << status->size()
+    << "for generation:" << peer->generation << dendl; 
       // all peers must agree on the number of shards
       return -EINVAL;
     }
@@ -794,7 +797,7 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
 
 
       retcode = take_min_status(cct, totrim.gen, peer_status.cbegin(),
-                               peer_status.cend(), &min_markers);
+                               peer_status.cend(), &min_markers, dpp);
       if (retcode < 0) {
        ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl;
        return set_cr_error(retcode);
index 647ec5cd2e9a0e79612e210261e2c30a04edda67..e9973f5020191cab08c83a852205ea4a84eaac85 100644 (file)
@@ -218,7 +218,6 @@ inline auto matches_gen(uint64_t gen)
 
 inline bucket_index_layout_generation log_to_index_layout(const bucket_log_layout_generation& log_layout)
 {
-  ceph_assert(log_layout.layout.type == BucketLogType::InIndex);
   bucket_index_layout_generation index;
   index.gen = log_layout.layout.in_index.gen;
   index.layout.normal = log_layout.layout.in_index.layout;