return pool_scrub_priority > 0 ? pool_scrub_priority : cct->_conf->osd_scrub_priority;
}
-void PG::try_mark_clean()
-{
- if (actingset.size() == get_osdmap()->get_pg_size(info.pgid.pgid)) {
- state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
- state_set(PG_STATE_CLEAN);
- info.history.last_epoch_clean = get_osdmap_epoch();
- info.history.last_interval_clean = info.history.same_interval_since;
- past_intervals.clear();
- dirty_big_info = true;
- dirty_info = true;
- }
-
- if (is_active()) {
- kick_snap_trim();
- } else if (is_peered()) {
- if (is_clean()) {
- bool target;
- if (pool.info.is_pending_merge(info.pgid.pgid, &target)) {
- if (target) {
- ldout(cct, 10) << "ready to merge (target)" << dendl;
- osd->set_ready_to_merge_target(this,
- info.last_update,
- info.history.last_epoch_started,
- info.history.last_epoch_clean);
- } else {
- ldout(cct, 10) << "ready to merge (source)" << dendl;
- osd->set_ready_to_merge_source(this, info.last_update);
- }
- }
- } else {
- ldout(cct, 10) << "not clean, not ready to merge" << dendl;
- // we should have notified OSD in Active state entry point
- }
- }
-
- state_clear(PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
-
- recovery_state.share_pg_info();
- publish_stats_to_osd();
- requeue_ops(waiting_for_clean_to_primary_repair);
-}
-
Context *PG::finish_recovery()
{
dout(10) << "finish_recovery" << dendl;
void on_active_exit() override;
Context *on_clean() override {
- try_mark_clean();
+ if (is_active()) {
+ kick_snap_trim();
+ }
+ requeue_ops(waiting_for_clean_to_primary_repair);
return finish_recovery();
}
friend class TestOpsSocketHook;
void publish_stats_to_osd() override;
- void try_mark_clean(); ///< mark an active pg clean
-
bool needs_recovery() const {
return recovery_state.needs_recovery();
}
}
}
+void PeeringState::try_mark_clean()
+{
+ if (actingset.size() == get_osdmap()->get_pg_size(info.pgid.pgid)) {
+ state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
+ state_set(PG_STATE_CLEAN);
+ info.history.last_epoch_clean = get_osdmap_epoch();
+ info.history.last_interval_clean = info.history.same_interval_since;
+ past_intervals.clear();
+ dirty_big_info = true;
+ dirty_info = true;
+ }
+
+ if (!is_active() && is_peered()) {
+ if (is_clean()) {
+ bool target;
+ if (pool.info.is_pending_merge(info.pgid.pgid, &target)) {
+ if (target) {
+ psdout(10) << "ready to merge (target)" << dendl;
+ pl->set_ready_to_merge_target(
+ info.last_update,
+ info.history.last_epoch_started,
+ info.history.last_epoch_clean);
+ } else {
+ psdout(10) << "ready to merge (source)" << dendl;
+ pl->set_ready_to_merge_source(info.last_update);
+ }
+ }
+ } else {
+ psdout(10) << "not clean, not ready to merge" << dendl;
+ // we should have notified OSD in Active state entry point
+ }
+ }
+
+ state_clear(PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
+
+ share_pg_info();
+ pl->publish_stats_to_osd();
+}
+
/*------------ Peering State Machine----------------*/
#undef dout_prefix
#define dout_prefix (context< PeeringMachine >().dpp->gen_prefix(*_dout) \
if (ps->info.last_complete != ps->info.last_update) {
ceph_abort();
}
+
+ ps->try_mark_clean();
+
context< PeeringMachine >().get_cur_transaction()->register_on_commit(
pl->on_clean());
}
pg_shard_t from, const pg_query_t &query, epoch_t query_epoch);
void fulfill_query(const MQuery& q, PeeringCtx *rctx);
+ void try_mark_clean();
+
public:
PeeringState(
CephContext *cct,