backfill_machine.process_event(*std::move(evt));
}
+ hobject_t get_last_backfill_started() const {
+ return last_backfill_started;
+ }
private:
hobject_t last_backfill_started;
BackfillInterval backfill_info;
).then([this, &pg, pgref] {
eversion_t ver;
const hobject_t& soid = m->get_hobj();
- if (pg.is_unreadable_object(soid, &ver)) {
- auto [op, fut] = osd.get_shard_services().start_operation<UrgentRecovery>(
- soid, ver, pgref, osd.get_shard_services(), m->get_min_epoch());
- return std::move(fut);
+ logger().debug("{} check for recovery, {}", *this, soid);
+ if (pg.is_unreadable_object(soid, &ver) ||
+ pg.is_degraded_or_backfilling_object(soid)) {
+ logger().debug("{} need to wait for recovery, {}", *this, soid);
+ if (pg.get_recovery_backend()->is_recovering(soid)) {
+ return pg.get_recovery_backend()->get_recovering(soid).wait_for_recovered();
+ } else {
+ auto [op, fut] = osd.get_shard_services().start_operation<UrgentRecovery>(
+ soid, ver, pgref, osd.get_shard_services(), pg.get_osdmap_epoch());
+ return std::move(fut);
+ }
}
return seastar::now();
}).then([this, &pg] {
< peering_state.get_info().history.same_primary_since, false);
}
+bool PG::is_degraded_or_backfilling_object(const hobject_t& soid) const {
+ /* The conditions below may clear (on_local_recover, before we queue
+ * the transaction) before we actually requeue the degraded waiters
+ * in on_global_recover after the transaction completes.
+ */
+ if (peering_state.get_pg_log().get_missing().get_items().count(soid))
+ return true;
+ ceph_assert(!get_acting_recovery_backfill().empty());
+ for (auto& peer : get_acting_recovery_backfill()) {
+ if (peer == get_primary()) continue;
+ auto peer_missing_entry = peering_state.get_peer_missing().find(peer);
+ // If an object is missing on an async_recovery_target, return false.
+ // This will not block the op and the object is async recovered later.
+ if (peer_missing_entry != peering_state.get_peer_missing().end() &&
+ peer_missing_entry->second.get_items().count(soid)) {
+ return true;
+ }
+ // Object is degraded if after last_backfill AND
+ // we are backfilling it
+ if (is_backfill_target(peer) &&
+ peering_state.get_peer_info(peer).last_backfill <= soid &&
+ recovery_handler->backfill_state->get_last_backfill_started() >= soid &&
+ recovery_backend->is_recovering(soid)) {
+ return true;
+ }
+ }
+ return false;
+}
+
}
const set<pg_shard_t> &get_acting_recovery_backfill() const {
return peering_state.get_acting_recovery_backfill();
}
+ bool is_backfill_target(pg_shard_t osd) const {
+ return peering_state.is_backfill_target(osd);
+ }
void begin_peer_recover(pg_shard_t peer, const hobject_t oid) {
peering_state.begin_peer_recover(peer, oid);
}
!peering_state.get_missing_loc().readable_with_acting(
oid, get_actingset(), v);
}
+ bool is_degraded_or_backfilling_object(const hobject_t& soid) const;
const set<pg_shard_t> &get_actingset() const {
return peering_state.get_actingset();
}
bool budget_available() const final;
void backfilled() final;
friend crimson::osd::BackfillState::PGFacade;
+ friend crimson::osd::PG;
// backfill end
};
{
logger().debug("{}", __func__);
// Check that from is in backfill_targets vector
- ceph_assert(pg.get_peering_state().is_backfill_target(m.from));
+ ceph_assert(pg.is_backfill_target(m.from));
BackfillInterval bi;
bi.begin = m.begin;