state_clear(PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
- share_pg_info();
+ recovery_state.share_pg_info();
publish_stats_to_osd();
requeue_ops(waiting_for_clean_to_primary_repair);
}
scrub_unreserve_replicas();
if (is_active() && is_primary()) {
- share_pg_info();
- }
-}
-
-void PG::share_pg_info()
-{
- dout(10) << "share_pg_info" << dendl;
-
- // share new pg_info_t with replicas
- ceph_assert(!acting_recovery_backfill.empty());
- for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
- i != acting_recovery_backfill.end();
- ++i) {
- if (*i == pg_whoami) continue;
- auto pg_shard = *i;
- auto peer = peer_info.find(pg_shard);
- if (peer != peer_info.end()) {
- peer->second.last_epoch_started = info.last_epoch_started;
- peer->second.last_interval_started = info.last_interval_started;
- peer->second.history.merge(info.history);
- }
- MOSDPGInfo *m = new MOSDPGInfo(get_osdmap_epoch());
- m->pg_list.emplace_back(
- pg_notify_t(
- pg_shard.shard, pg_whoami.shard,
- get_osdmap_epoch(),
- get_osdmap_epoch(),
- info),
- past_intervals);
- osd->send_message_osd_cluster(pg_shard.osd, m, get_osdmap_epoch());
+ recovery_state.share_pg_info();
}
}
bool queue_scrub();
unsigned get_scrub_priority();
- /// share pg info after a pg is active
- void share_pg_info();
-
-
bool append_log_entries_update_missing(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObjectStore::Transaction &t,
}
}
+void PeeringState::share_pg_info()
+{
+ psdout(10) << "share_pg_info" << dendl;
+
+ // share new pg_info_t with replicas
+ ceph_assert(!acting_recovery_backfill.empty());
+ for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
+ i != acting_recovery_backfill.end();
+ ++i) {
+ if (*i == pg_whoami) continue;
+ auto pg_shard = *i;
+ auto peer = peer_info.find(pg_shard);
+ if (peer != peer_info.end()) {
+ peer->second.last_epoch_started = info.last_epoch_started;
+ peer->second.last_interval_started = info.last_interval_started;
+ peer->second.history.merge(info.history);
+ }
+ MOSDPGInfo *m = new MOSDPGInfo(get_osdmap_epoch());
+ m->pg_list.emplace_back(
+ pg_notify_t(
+ pg_shard.shard, pg_whoami.shard,
+ get_osdmap_epoch(),
+ get_osdmap_epoch(),
+ info),
+ past_intervals);
+ pl->send_cluster_message(pg_shard.osd, m, get_osdmap_epoch());
+ }
+}
+
/*------------ Peering State Machine----------------*/
#undef dout_prefix
#define dout_prefix (context< PeeringMachine >().dpp->gen_prefix(*_dout) \
// purged snaps and (b) perhaps share more snaps that we have purged
// but didn't fit in pg_stat_t.
need_publish = true;
- pg->share_pg_info();
+ ps->share_pg_info();
}
for (size_t i = 0; i < ps->want_acting.size(); i++) {
ps->info.history.last_interval_started = ps->info.last_interval_started;
ps->dirty_info = true;
- pg->share_pg_info();
+ ps->share_pg_info();
pl->publish_stats_to_osd();
pl->on_activate_complete();
map<int, map<spg_t,pg_query_t> >& query_map,
map<int, vector<pair<pg_notify_t, PastIntervals> > > *activator_map,
PeeringCtx *ctx);
+ void share_pg_info();
public:
PeeringState(
info.stats.pin_stats_invalid = false;
info.stats.manifest_stats_invalid = false;
publish_stats_to_osd();
- share_pg_info();
+ recovery_state.share_pg_info();
}
}
// Clear object context cache to get repair information
int tr = pg->osd->store->queue_transaction(pg->ch, std::move(t), NULL);
ceph_assert(tr == 0);
- pg->share_pg_info();
+ pg->recovery_state.share_pg_info();
post_event(KickTrim());
return transit< NotTrimming >();
}