pg_id);
}
+void PG::on_active_exit()
+{
+ backfill_reserving = false;
+ agent_stop();
+}
+
void PG::on_active_advmap(const OSDMapRef &osdmap)
{
if (osdmap->require_osd_release >= CEPH_RELEASE_MIMIC) {
queue_recovery();
}
}
+
+void PG::on_backfill_reserved()
+{
+ backfill_reserving = false;
+ queue_recovery();
+}
+
+void PG::on_backfill_canceled()
+{
+ if (!waiting_on_backfill.empty()) {
+ waiting_on_backfill.clear();
+ finish_recovery_op(hobject_t::get_max());
+ }
+}
+
+void PG::on_recovery_reserved()
+{
+ queue_recovery();
+}
+
+
void PG::do_replica_scrub_map(OpRequestRef op)
{
const MOSDRepScrubMap *m = static_cast<const MOSDRepScrubMap*>(op->get_req());
PGPeeringEventRef on_preempt) override;
void cancel_remote_recovery_reservation() override;
+ void on_active_exit() override;
void on_active_actmap() override;
void on_active_advmap(const OSDMapRef &osdmap) override;
+ void on_backfill_reserved() override;
+ void on_backfill_canceled() override;
+ void on_recovery_reserved() override;
+
bool is_forced_recovery_or_backfill() const {
return recovery_state.is_forced_recovery_or_backfill();
}
DECLARE_LOCALS
ps->backfill_reserved = true;
- pg->queue_recovery();
+ pl->on_backfill_reserved();
ps->state_clear(PG_STATE_BACKFILL_TOOFULL);
ps->state_clear(PG_STATE_BACKFILL_WAIT);
ps->state_set(PG_STATE_BACKFILLING);
{
DECLARE_LOCALS
backfill_release_reservations();
- if (!pg->waiting_on_backfill.empty()) {
- pg->waiting_on_backfill.clear();
- pg->finish_recovery_op(hobject_t::get_max());
- }
+ pl->on_backfill_canceled();
}
boost::statechart::result
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS
ps->backfill_reserved = false;
- pg->backfill_reserving = false;
ps->state_clear(PG_STATE_BACKFILLING);
ps->state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
utime_t dur = ceph_clock_now() - enter_time;
ps->state_clear(PG_STATE_RECOVERY_WAIT);
ps->state_clear(PG_STATE_RECOVERY_TOOFULL);
ps->state_set(PG_STATE_RECOVERING);
+ pl->on_recovery_reserved();
ceph_assert(!ps->state_test(PG_STATE_ACTIVATING));
pl->publish_stats_to_osd();
- pg->queue_recovery();
}
void PeeringState::Recovering::release_reservations(bool cancel)
pg->waiting_for_flush.swap(pg->waiting_for_peered);
}
- pg->on_activate();
+ pl->on_activate();
return discard_event();
}
ps->blocked_by.clear();
ps->backfill_reserved = false;
- pg->backfill_reserving = false;
ps->state_clear(PG_STATE_ACTIVATING);
ps->state_clear(PG_STATE_DEGRADED);
ps->state_clear(PG_STATE_UNDERSIZED);
ps->state_clear(PG_STATE_RECOVERY_TOOFULL);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_active_latency, dur);
- pg->agent_stop();
+ pl->on_active_exit();
}
/*------ReplicaActive-----*/
virtual void check_blacklisted_watchers() = 0;
virtual void clear_primary_state() = 0;
+ // Event notification
virtual void on_pool_change() = 0;
virtual void on_role_change() = 0;
virtual void on_change(ObjectStore::Transaction *t) = 0;
virtual void on_activate() = 0;
virtual void on_new_interval() = 0;
+ virtual void on_active_exit() = 0;
// active map notifications
virtual void on_active_actmap() = 0;
virtual void on_active_advmap(const OSDMapRef &osdmap) = 0;
+
+ // recovery reservation notifications
+ virtual void on_backfill_reserved() = 0;
+ virtual void on_backfill_canceled() = 0;
+ virtual void on_recovery_reserved() = 0;
+
virtual epoch_t oldest_stored_osdmap() = 0;
virtual LogChannel &get_clog() = 0;