]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd/: move share_pg_info to PeeringState
authorSamuel Just <sjust@redhat.com>
Fri, 12 Apr 2019 22:33:29 +0000 (15:33 -0700)
committersjust@redhat.com <sjust@redhat.com>
Wed, 1 May 2019 18:22:22 +0000 (11:22 -0700)
Signed-off-by: Samuel Just <sjust@redhat.com>
src/osd/PG.cc
src/osd/PG.h
src/osd/PeeringState.cc
src/osd/PeeringState.h
src/osd/PrimaryLogPG.cc

index ddf5cf20f21f0fe399ea68a6883261a40c3dc840..dcb6331470f043ff48acb249da8aad32d03e373b 100644 (file)
@@ -597,7 +597,7 @@ void PG::try_mark_clean()
 
   state_clear(PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
 
-  share_pg_info();
+  recovery_state.share_pg_info();
   publish_stats_to_osd();
   requeue_ops(waiting_for_clean_to_primary_repair);
 }
@@ -4188,36 +4188,7 @@ void PG::scrub_finish()
   scrub_unreserve_replicas();
 
   if (is_active() && is_primary()) {
-    share_pg_info();
-  }
-}
-
-void PG::share_pg_info()
-{
-  dout(10) << "share_pg_info" << dendl;
-
-  // share new pg_info_t with replicas
-  ceph_assert(!acting_recovery_backfill.empty());
-  for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
-       i != acting_recovery_backfill.end();
-       ++i) {
-    if (*i == pg_whoami) continue;
-    auto pg_shard = *i;
-    auto peer = peer_info.find(pg_shard);
-    if (peer != peer_info.end()) {
-      peer->second.last_epoch_started = info.last_epoch_started;
-      peer->second.last_interval_started = info.last_interval_started;
-      peer->second.history.merge(info.history);
-    }
-    MOSDPGInfo *m = new MOSDPGInfo(get_osdmap_epoch());
-    m->pg_list.emplace_back(
-       pg_notify_t(
-         pg_shard.shard, pg_whoami.shard,
-         get_osdmap_epoch(),
-         get_osdmap_epoch(),
-         info),
-       past_intervals);
-    osd->send_message_osd_cluster(pg_shard.osd, m, get_osdmap_epoch());
+    recovery_state.share_pg_info();
   }
 }
 
index 53e6f73521c1d5d249595c51675b25ede9ecba9b..2c7a0f986cc7b345edc257ed7593b6460930a2a8 100644 (file)
@@ -1531,10 +1531,6 @@ protected:
   bool queue_scrub();
   unsigned get_scrub_priority();
 
-  /// share pg info after a pg is active
-  void share_pg_info();
-
-
   bool append_log_entries_update_missing(
     const mempool::osd_pglog::list<pg_log_entry_t> &entries,
     ObjectStore::Transaction &t,
index d66d83613b741db9e847efeac65c87dcea84a528..0166912c77e8e1749b81cc74de32e774824f1e78 100644 (file)
@@ -2334,6 +2334,35 @@ void PeeringState::activate(
   }
 }
 
+void PeeringState::share_pg_info()
+{
+  psdout(10) << "share_pg_info" << dendl;
+
+  // share new pg_info_t with replicas
+  ceph_assert(!acting_recovery_backfill.empty());
+  for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
+       i != acting_recovery_backfill.end();
+       ++i) {
+    if (*i == pg_whoami) continue;
+    auto pg_shard = *i;
+    auto peer = peer_info.find(pg_shard);
+    if (peer != peer_info.end()) {
+      peer->second.last_epoch_started = info.last_epoch_started;
+      peer->second.last_interval_started = info.last_interval_started;
+      peer->second.history.merge(info.history);
+    }
+    MOSDPGInfo *m = new MOSDPGInfo(get_osdmap_epoch());
+    m->pg_list.emplace_back(
+      pg_notify_t(
+       pg_shard.shard, pg_whoami.shard,
+       get_osdmap_epoch(),
+       get_osdmap_epoch(),
+       info),
+      past_intervals);
+    pl->send_cluster_message(pg_shard.osd, m, get_osdmap_epoch());
+  }
+}
+
 /*------------ Peering State Machine----------------*/
 #undef dout_prefix
 #define dout_prefix (context< PeeringMachine >().dpp->gen_prefix(*_dout) \
@@ -3689,7 +3718,7 @@ boost::statechart::result PeeringState::Active::react(const AdvMap& advmap)
     // purged snaps and (b) perhaps share more snaps that we have purged
     // but didn't fit in pg_stat_t.
     need_publish = true;
-    pg->share_pg_info();
+    ps->share_pg_info();
   }
 
   for (size_t i = 0; i < ps->want_acting.size(); i++) {
@@ -3938,7 +3967,7 @@ boost::statechart::result PeeringState::Active::react(const AllReplicasActivated
   ps->info.history.last_interval_started = ps->info.last_interval_started;
   ps->dirty_info = true;
 
-  pg->share_pg_info();
+  ps->share_pg_info();
   pl->publish_stats_to_osd();
 
   pl->on_activate_complete();
index 6313136dffa30eca1c649439fcc18281a57b14fb..1549628b6427e8f1744300317d72c6d65d0b4a8e 100644 (file)
@@ -1346,6 +1346,7 @@ public:
     map<int, map<spg_t,pg_query_t> >& query_map,
     map<int, vector<pair<pg_notify_t, PastIntervals> > > *activator_map,
     PeeringCtx *ctx);
+  void share_pg_info();
 
 public:
   PeeringState(
index b24b2ec31b82d809813bcb43c8d642d59a5c3cbb..b2b9c914ea19ce5b6ba3e50ccdf0c9455e58a2f4 100644 (file)
@@ -15180,7 +15180,7 @@ void PrimaryLogPG::_scrub_finish()
       info.stats.pin_stats_invalid = false;
       info.stats.manifest_stats_invalid = false;
       publish_stats_to_osd();
-      share_pg_info();
+      recovery_state.share_pg_info();
     }
   }
   // Clear object context cache to get repair information
@@ -15373,7 +15373,7 @@ boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&)
     int tr = pg->osd->store->queue_transaction(pg->ch, std::move(t), NULL);
     ceph_assert(tr == 0);
 
-    pg->share_pg_info();
+    pg->recovery_state.share_pg_info();
     post_event(KickTrim());
     return transit< NotTrimming >();
   }