}
pg->handle_initialize(rctx);
- pg->handle_activate_map(rctx);
+ pg->handle_activate_map(rctx, startmap->get_epoch());
dispatch_context(rctx, pg.get(), osdmap, nullptr);
OSDMapRef lastmap = pg->get_osdmap();
set<PGRef> new_pgs; // any split children
bool ret = true;
+ auto first_new_epoch = pg->get_osdmap_epoch() + 1;
unsigned old_pg_num = lastmap->have_pg_pool(pg->pg_id.pool()) ?
lastmap->get_pg_num(pg->pg_id.pool()) : 0;
- for (epoch_t next_epoch = pg->get_osdmap_epoch() + 1;
+ for (epoch_t next_epoch = first_new_epoch;
next_epoch <= osd_epoch;
++next_epoch) {
OSDMapRef nextmap = service.try_get_map(next_epoch);
old_pg_num = new_pg_num;
handle.reset_tp_timeout();
}
- pg->handle_activate_map(rctx);
+ pg->handle_activate_map(rctx, first_new_epoch);
ret = true;
out:
rctx);
}
-void PG::handle_activate_map(PeeringCtx &rctx)
+void PG::handle_activate_map(PeeringCtx &rctx, epoch_t range_starts_at)
{
- dout(10) << __func__ << ": " << get_osdmap()->get_epoch()
- << dendl;
+ dout(10) << fmt::format("{}: epoch range: {}..{}", __func__, range_starts_at,
+ get_osdmap()->get_epoch())
+ << dendl;
recovery_state.activate_map(rctx);
-
requeue_map_waiters();
- // pool options affecting scrub may have changed
- on_scrub_schedule_input_change();
+ // If pool.info changed during this sequence of map updates, invoke
+ // on_scrub_schedule_input_change() as pool.info contains scrub scheduling
+ // parameters.
+ if (pool.info.last_change >= range_starts_at) {
+ on_scrub_schedule_input_change();
+ }
}
void PG::handle_initialize(PeeringCtx &rctx)
std::vector<int>& newup, int up_primary,
std::vector<int>& newacting, int acting_primary,
PeeringCtx &rctx);
- void handle_activate_map(PeeringCtx &rctx);
+
+ /**
+ * \note: handle_activate_map() is not guaranteed to be called for
+ * each epoch in sequence. Thus we supply it with the full range of
+ * epochs that were skipped.
+ */
+ void handle_activate_map(PeeringCtx &rctx, epoch_t range_starts_at);
+
void handle_initialize(PeeringCtx &rxcx);
void handle_query_state(ceph::Formatter *f);