get_osdmap()->get_epoch());
}
+void PG::schedule_backfill_full_retry()
+{
+ Mutex::Locker lock(osd->backfill_request_lock);
+ osd->backfill_request_timer.add_event_after(
+ g_conf->osd_backfill_retry_interval,
+ new QueuePeeringEvt<RequestBackfill>(
+ this, get_osdmap()->get_epoch(),
+ RequestBackfill()));
+}
+
void PG::clear_scrub_reserved()
{
osd->scrub_wq.dequeue(this);
pg->state_clear(PG_STATE_BACKFILL);
}
-template <class EVT>
-struct QueuePeeringEvt : Context {
- boost::intrusive_ptr<PG> pg;
- epoch_t epoch;
- EVT evt;
- QueuePeeringEvt(PG *pg, epoch_t epoch, EVT evt) :
- pg(pg), epoch(epoch), evt(evt) {}
- void finish(int r) {
- pg->lock();
- pg->queue_peering_event(PG::CephPeeringEvtRef(
- new PG::CephPeeringEvt(
- epoch,
- epoch,
- evt)));
- pg->unlock();
- }
-};
-
/*--WaitRemoteBackfillReserved--*/
PG::RecoveryState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_context ctx)
pg->state_clear(PG_STATE_BACKFILL_WAIT);
pg->state_set(PG_STATE_BACKFILL_TOOFULL);
- Mutex::Locker lock(pg->osd->backfill_request_lock);
- pg->osd->backfill_request_timer.add_event_after(
- g_conf->osd_backfill_retry_interval,
- new QueuePeeringEvt<RequestBackfill>(
- pg, pg->get_osdmap()->get_epoch(),
- RequestBackfill()));
+ pg->schedule_backfill_full_retry();
return transit<NotBackfilling>();
}
void sub_op_scrub_stop(OpRequestRef op);
void reject_reservation();
+ void schedule_backfill_full_retry();
// -- recovery state --
+ template <class EVT>
+ struct QueuePeeringEvt : Context {
+ boost::intrusive_ptr<PG> pg;
+ epoch_t epoch;
+ EVT evt;
+ QueuePeeringEvt(PG *pg, epoch_t epoch, EVT evt) :
+ pg(pg), epoch(epoch), evt(evt) {}
+ void finish(int r) {
+ pg->lock();
+ pg->queue_peering_event(PG::CephPeeringEvtRef(
+ new PG::CephPeeringEvt(
+ epoch,
+ epoch,
+ evt)));
+ pg->unlock();
+ }
+ };
+
class CephPeeringEvt {
epoch_t epoch_sent;
epoch_t epoch_requested;