From: Shraddha Agrawal Date: Thu, 19 Mar 2026 08:01:28 +0000 (+0530) Subject: crimson/osd/pg_recovery: call MOSDPGRecoveryDelete instead of MOSDPGBackfillRemove X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F67925%2Fhead;p=ceph.git crimson/osd/pg_recovery: call MOSDPGRecoveryDelete instead of MOSDPGBackfillRemove This commit fixes the abort in Recovered::Recovered. There is a race to acquire the OBC lock between backfill and client delete for the same object. When the lock is acquired first by the backfill, the object is recovered first, and then deleted by the client delete request. When recovering the object, the corresponding peer_missing entry is cleared and we are able to transition to Recovered state successfully. When the lock is acquired first by client delete request, the object is deleted. Then backfill tries to recover the object, finds it deleted and exists early. The stale peer_missing entry is not cleared. In Recovered::Recovered, needs_recovery() sees this stale peer_missing entry and calls abort. The issue is fixed by sending MOSDPGRecoveryDelete from the client path to peers and waiting for MOSDPGRecoveryDeleteReply in recover_object. Fixes: https://tracker.ceph.com/issues/70501 Signed-off-by: Shraddha Agrawal --- diff --git a/src/crimson/osd/backfill_state.cc b/src/crimson/osd/backfill_state.cc index 2e39d2f6629a..411b009d4891 100644 --- a/src/crimson/osd/backfill_state.cc +++ b/src/crimson/osd/backfill_state.cc @@ -747,10 +747,7 @@ void BackfillState::enqueue_standalone_delete( const eversion_t &v, const std::vector &peers) { - progress_tracker->enqueue_drop(obj); - for (auto bt : peers) { - backfill_machine.backfill_listener.enqueue_drop(bt, obj, v); - } + backfill_machine.backfill_listener.send_recovery_deletes(obj, peers); } std::ostream &operator<<(std::ostream &out, const BackfillState::PGFacade &pg) { diff --git a/src/crimson/osd/backfill_state.h b/src/crimson/osd/backfill_state.h index 5ca001b7dae0..bd30ad9778e2 100644 --- a/src/crimson/osd/backfill_state.h +++ b/src/crimson/osd/backfill_state.h @@ -374,6 +374,10 @@ struct BackfillState::BackfillListener { const hobject_t& obj, const eversion_t& v) = 0; + virtual void send_recovery_deletes( + const hobject_t& obj, + const std::vector& peers) = 0; + virtual void maybe_flush() = 0; virtual void update_peers_last_backfill( diff --git a/src/crimson/osd/pg_recovery.cc b/src/crimson/osd/pg_recovery.cc index 7e1ed5858cf2..e8828d7e84c6 100644 --- a/src/crimson/osd/pg_recovery.cc +++ b/src/crimson/osd/pg_recovery.cc @@ -15,6 +15,7 @@ #include "crimson/osd/pg_backend.h" #include "crimson/osd/pg_recovery.h" +#include "messages/MOSDPGRecoveryDelete.h" #include "osd/osd_types.h" #include "osd/PeeringState.h" @@ -574,6 +575,33 @@ void PGRecovery::enqueue_drop( req->ls.emplace_back(obj, v); } +void PGRecovery::send_recovery_deletes( + const hobject_t& obj, + const std::vector& peers) +{ + LOG_PREFIX(PGRecovery::send_recovery_deletes); + DEBUGDPP("obj={}", *pg->get_dpp(), obj); + if (!pg->get_recovery_backend()->is_recovering(obj)) { + DEBUGDPP("obj={} is not recovering, exiting early", *pg->get_dpp(), obj); + return; + } + auto& peering_state = pg->get_peering_state(); + epoch_t min_epoch = pg->get_last_peering_reset(); + auto& recovering = pg->get_recovery_backend()->get_recovering(obj); + for (const auto& peer : peers) { + pg_missing_item item; + if (peering_state.get_peer_missing(peer).is_missing(obj, &item)) { + std::ignore = recovering.wait_for_pushes(peer); + spg_t target_pg(pg->get_pgid().pgid, peer.shard); + auto msg = crimson::make_message( + pg->get_pg_whoami(), target_pg, pg->get_osdmap_epoch(), min_epoch); + msg->objects.push_back(std::make_pair(obj, item.need)); + std::ignore = pg->get_shard_services().send_to_osd( + peer.osd, std::move(msg), pg->get_osdmap_epoch()); + } + } +} + void PGRecovery::maybe_flush() { for (auto& [target, req] : backfill_drop_requests) { diff --git a/src/crimson/osd/pg_recovery.h b/src/crimson/osd/pg_recovery.h index 32976dfcea6d..f3736d4cbee3 100644 --- a/src/crimson/osd/pg_recovery.h +++ b/src/crimson/osd/pg_recovery.h @@ -136,6 +136,9 @@ private: const pg_shard_t& target, const hobject_t& obj, const eversion_t& v) final; + void send_recovery_deletes( + const hobject_t& obj, + const std::vector& peers) final; void maybe_flush() final; void update_peers_last_backfill( const hobject_t& new_last_backfill) final; diff --git a/src/crimson/osd/recovery_backend.h b/src/crimson/osd/recovery_backend.h index e13e349e6c8d..5e164384e436 100644 --- a/src/crimson/osd/recovery_backend.h +++ b/src/crimson/osd/recovery_backend.h @@ -189,6 +189,15 @@ public: seastar::future<> wait_for_pushes(pg_shard_t shard) { return pushes[shard].get_shared_future(); } + bool has_pushes() const { + return !pushes.empty(); + } + seastar::future<> wait_for_all_pushes() { + return seastar::parallel_for_each(pushes, + [](auto& entry) { + return entry.second.get_shared_future(); + }); + } seastar::future<> wait_for_recovered() { if (!recovered) { recovered = seastar::shared_promise<>(); diff --git a/src/crimson/osd/replicated_recovery_backend.cc b/src/crimson/osd/replicated_recovery_backend.cc index 0711ec430ed9..a1f2f99caabf 100644 --- a/src/crimson/osd/replicated_recovery_backend.cc +++ b/src/crimson/osd/replicated_recovery_backend.cc @@ -43,15 +43,30 @@ ReplicatedRecoveryBackend::recover_object( DEBUGDPP("loading obc: {}", pg, soid); return pg.obc_loader.with_obc(soid, [FNAME, this, soid, need](auto head, auto obc) { - if (!obc->obs.exists) { - // XXX: this recovery must be triggered by backfills and the corresponding - // object must have been deleted by some client request after the object - // is enqueued for push but before the lock is acquired by the recovery. - // - // Abort the recovery in this case, a "recover_delete" must have been - // added for this object by the client request that deleted it. - return interruptor::now(); - } + if (!obc->obs.exists) { + // XXX: this recovery must be triggered by backfills and the corresponding + // object must have been deleted by some client request after the object + // is enqueued for push but before the lock is acquired by the recovery. + // + // Abort the recovery in this case. A MOSDPGRecoveryDelete must have been + // sent, for this object to peers, by the client request that deleted it. + DEBUGDPP("obj={}, v={} not found on primary, aborting backfill", pg, soid, need); + + // if client delete request sent MOSDPGRecoveryDelete, we need to wait + // for MOSDPGRecoveryDeleteReply from peers. + auto& recovery_waiter = get_recovering(soid); + if (recovery_waiter.has_pushes()) { + DEBUGDPP("obj={}, v={} waiting for pushes", pg, soid, need); + return interruptor::make_interruptible( + recovery_waiter.wait_for_all_pushes() + ).then_interruptible([this, soid] { + object_stat_sum_t stat_diff; + stat_diff.num_objects_recovered = 1; + pg.get_recovery_handler()->on_global_recover(soid, stat_diff, true); + }); + } + return interruptor::now(); + } DEBUGDPP("loaded obc: {}", pg, obc->obs.oi.soid); auto& recovery_waiter = get_recovering(soid); recovery_waiter.obc = obc; diff --git a/src/test/crimson/test_backfill.cc b/src/test/crimson/test_backfill.cc index 8a486570b85b..2200d2810dee 100644 --- a/src/test/crimson/test_backfill.cc +++ b/src/test/crimson/test_backfill.cc @@ -166,6 +166,10 @@ class BackfillFixture : public crimson::osd::BackfillState::BackfillListener { const hobject_t& obj, const eversion_t& v) override; + void send_recovery_deletes( + const hobject_t& obj, + const std::vector& peers) override; + void maybe_flush() override; void update_peers_last_backfill( @@ -399,6 +403,13 @@ void BackfillFixture::enqueue_drop( enqueued_drops[target].emplace_back(obj, v); } +void BackfillFixture::send_recovery_deletes( + const hobject_t& obj, + const std::vector& peers) +{ + // no-op in test mock +} + void BackfillFixture::maybe_flush() { for (const auto& [target, versioned_objs] : enqueued_drops) {