backfill_machine.backfill_listener.enqueue_push(obj, v, peers);
}
+void BackfillState::enqueue_standalone_delete(
+ const hobject_t &obj,
+ const eversion_t &v,
+ const std::vector<pg_shard_t> &peers)
+{
+ progress_tracker->enqueue_drop(obj);
+ for (auto bt : peers) {
+ backfill_machine.backfill_listener.enqueue_drop(bt, obj, v);
+ }
+}
+
std::ostream &operator<<(std::ostream &out, const BackfillState::PGFacade &pg) {
return pg.print(out);
}
backfill_state->enqueue_standalone_push(obj, v, peers);
}
+void PG::enqueue_delete_for_backfill(
+ const hobject_t &obj,
+ const eversion_t &v,
+ const std::vector<pg_shard_t> &peers)
+{
+ assert(recovery_handler);
+ assert(recovery_handler->backfill_state);
+ auto backfill_state = recovery_handler->backfill_state.get();
+ backfill_state->enqueue_standalone_delete(obj, v, peers);
+}
+
PG::interruptible_future<
std::tuple<PG::interruptible_future<>,
PG::interruptible_future<>>>
bufferlist encoded_txn;
encode(txn, encoded_txn);
+ bool is_delete = false;
for (auto &le : log_entries) {
le.mark_unrollbackable();
+ if (le.is_delete()) {
+ is_delete = true;
+ }
}
std::vector<pg_shard_t> to_push_clone;
+ std::vector<pg_shard_t> to_push_delete;
auto sends = std::make_unique<std::vector<seastar::future<>>>();
for (auto &pg_shard : pg_shards) {
if (pg_shard == whoami) {
m = new_repop_msg(
pg_shard, hoid, encoded_txn, osd_op_p,
min_epoch, map_epoch, log_entries, false, tid);
- if (_new_clone && pg.is_missing_on_peer(pg_shard, hoid)) {
- // The head is in the push queue but hasn't been pushed yet.
- // We need to ensure that the newly created clone will be
- // pushed as well, otherwise we might skip it.
- // See: https://tracker.ceph.com/issues/68808
- to_push_clone.push_back(pg_shard);
+ if (pg.is_missing_on_peer(pg_shard, hoid)) {
+ if (_new_clone) {
+ // The head is in the push queue but hasn't been pushed yet.
+ // We need to ensure that the newly created clone will be
+ // pushed as well, otherwise we might skip it.
+ // See: https://tracker.ceph.com/issues/68808
+ to_push_clone.push_back(pg_shard);
+ }
+ if (is_delete) {
+ to_push_delete.push_back(pg_shard);
+ }
}
}
pending_txn->second.acked_peers.push_back({pg_shard, eversion_t{}});
return seastar::now();
}
return peers->all_committed.get_shared_future();
- }).then_interruptible([pending_txn, this, _new_clone,
+ }).then_interruptible([pending_txn, this, _new_clone, &hoid,
+ to_push_delete=std::move(to_push_delete),
to_push_clone=std::move(to_push_clone)] {
auto acked_peers = std::move(pending_txn->second.acked_peers);
pending_trans.erase(pending_txn);
_new_clone->obs.oi.version,
to_push_clone);
}
+ if (!to_push_delete.empty()) {
+ pg.enqueue_delete_for_backfill(hoid, {}, to_push_delete);
+ }
return seastar::make_ready_future<
crimson::osd::acked_peers_t>(std::move(acked_peers));
});