From: Shilpa Jagannath Date: Fri, 12 Jan 2024 17:59:57 +0000 (-0500) Subject: rgw/multisite: add datalog entry when transitioning to 'DELETED' log layout X-Git-Tag: v20.3.0~161^2~16 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=2e4a7db3a4da86bcc64aedf7995e073f1d29d30c;p=ceph.git rgw/multisite: add datalog entry when transitioning to 'DELETED' log layout Signed-off-by: Shilpa Jagannath --- diff --git a/src/rgw/driver/rados/rgw_bucket.cc b/src/rgw/driver/rados/rgw_bucket.cc index 9d02f21377a0..634747c141a2 100644 --- a/src/rgw/driver/rados/rgw_bucket.cc +++ b/src/rgw/driver/rados/rgw_bucket.cc @@ -2879,8 +2879,20 @@ int RGWBucketInstanceMetadataHandler::put_prepare( //bucket instance cleanup in multisite setup const auto& log = bci.info.layout.logs.back(); if (bci.info.bucket_deleted() && log.layout.type != rgw::BucketLogType::Deleted) { - bci.info.layout.logs.push_back({0, {rgw::BucketLogType::Deleted}}); + const auto index_log = bci.info.layout.logs.back(); + const int shards_num = rgw::num_shards(index_log.layout.in_index); + bci.info.layout.logs.push_back({log.gen+1, {rgw::BucketLogType::Deleted}}); ldpp_dout(dpp, 10) << "store log layout type: " << bci.info.layout.logs.back().layout.type << dendl; + for (int i = 0; i < shards_num; ++i) { + ldpp_dout(dpp, 10) << "adding to data_log shard_id: " << i << " of gen:" << index_log.gen << dendl; + ret = bihandler->svc.datalog_rados->add_entry(dpp, bci.info, index_log, i, + null_yield); + if (ret < 0) { + ldpp_dout(dpp, 1) << "WARNING: failed writing data log for bucket=" + << bci.info.bucket << ", shard_id=" << i << "of generation=" + << index_log.gen << dendl; + } // datalog error is not fatal + } } } diff --git a/src/rgw/driver/rados/rgw_trim_bilog.cc b/src/rgw/driver/rados/rgw_trim_bilog.cc index edc157cb6236..7d7dc08621f5 100644 --- a/src/rgw/driver/rados/rgw_trim_bilog.cc +++ b/src/rgw/driver/rados/rgw_trim_bilog.cc @@ -559,13 +559,16 @@ int take_min_status( const uint64_t min_generation, std::vector::const_iterator first, std::vector::const_iterator last, - std::vector *status) { + std::vector *status, const DoutPrefixProvider *dpp) { for (auto peer = first; peer != last; ++peer) { // Peers on later generations don't get a say in the matter if (peer->generation > min_generation) { continue; } if (peer->shards.size() != status->size()) { + ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" + << "ERROR: shards don't match. peer shard:" << peer->shards.size() << " my shards:" << status->size() + << "for generation:" << peer->generation << dendl; // all peers must agree on the number of shards return -EINVAL; } @@ -794,7 +797,7 @@ int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp) retcode = take_min_status(cct, totrim.gen, peer_status.cbegin(), - peer_status.cend(), &min_markers); + peer_status.cend(), &min_markers, dpp); if (retcode < 0) { ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl; return set_cr_error(retcode); diff --git a/src/rgw/rgw_bucket_layout.h b/src/rgw/rgw_bucket_layout.h index 647ec5cd2e9a..e9973f502019 100644 --- a/src/rgw/rgw_bucket_layout.h +++ b/src/rgw/rgw_bucket_layout.h @@ -218,7 +218,6 @@ inline auto matches_gen(uint64_t gen) inline bucket_index_layout_generation log_to_index_layout(const bucket_log_layout_generation& log_layout) { - ceph_assert(log_layout.layout.type == BucketLogType::InIndex); bucket_index_layout_generation index; index.gen = log_layout.layout.in_index.gen; index.layout.normal = log_layout.layout.in_index.layout;