]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
osd/: move last_..._to_applied, backfill_targets, async_recovery_targets to PeeringState
authorSamuel Just <sjust@redhat.com>
Sat, 6 Apr 2019 00:24:23 +0000 (17:24 -0700)
committersjust@redhat.com <sjust@redhat.com>
Wed, 1 May 2019 18:22:26 +0000 (11:22 -0700)
Signed-off-by: Samuel Just <sjust@redhat.com>
src/osd/PG.cc
src/osd/PG.h
src/osd/PeeringState.cc
src/osd/PeeringState.h
src/osd/PrimaryLogPG.cc
src/osd/PrimaryLogPG.h

index 26d5693802ceb70b45630f0f838c17c8353be98d..5358a4746d830091be0aa3383392b1a1a51f166b 100644 (file)
@@ -196,7 +196,6 @@ PG::PG(OSDService *o, OSDMapRef curmap,
   last_update_ondisk(recovery_state.last_update_ondisk),
   last_complete_ondisk(recovery_state.last_complete_ondisk),
   last_update_applied(recovery_state.last_update_applied),
-  last_rollback_info_trimmed_to_applied(recovery_state.last_rollback_info_trimmed_to_applied),
   peer_info(recovery_state.peer_info),
   peer_missing(recovery_state.peer_missing),
   peer_log_requested(recovery_state.peer_log_requested),
@@ -204,8 +203,6 @@ PG::PG(OSDService *o, OSDMapRef curmap,
   peer_last_complete_ondisk(recovery_state.peer_last_complete_ondisk),
   min_last_complete_ondisk(recovery_state.min_last_complete_ondisk),
   pg_trim_to(recovery_state.pg_trim_to),
-  backfill_targets(recovery_state.backfill_targets),
-  async_recovery_targets(recovery_state.async_recovery_targets),
   might_have_unfound(recovery_state.might_have_unfound),
   missing_loc(recovery_state.missing_loc),
   pg_id(p),
@@ -746,7 +743,6 @@ void PG::clear_recovery_state()
 {
   dout(10) << "clear_recovery_state" << dendl;
 
-  pg_log.reset_recovery_pointers();
   finish_sync_event = 0;
 
   hobject_t soid;
@@ -757,8 +753,6 @@ void PG::clear_recovery_state()
     finish_recovery_op(soid, true);
   }
 
-  async_recovery_targets.clear();
-  backfill_targets.clear();
   backfill_info.clear();
   peer_backfill_info.clear();
   waiting_on_backfill.clear();
@@ -2116,7 +2110,7 @@ void PG::clear_scrub_reserved()
 
 void PG::scrub_reserve_replicas()
 {
-  ceph_assert(backfill_targets.empty());
+  ceph_assert(recovery_state.get_backfill_targets().empty());
   for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
        i != acting_recovery_backfill.end();
        ++i) {
@@ -2133,7 +2127,7 @@ void PG::scrub_reserve_replicas()
 
 void PG::scrub_unreserve_replicas()
 {
-  ceph_assert(backfill_targets.empty());
+  ceph_assert(recovery_state.get_backfill_targets().empty());
   for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
        i != acting_recovery_backfill.end();
        ++i) {
@@ -2151,7 +2145,7 @@ void PG::scrub_unreserve_replicas()
 void PG::_scan_rollback_obs(const vector<ghobject_t> &rollback_obs)
 {
   ObjectStore::Transaction t;
-  eversion_t trimmed_to = last_rollback_info_trimmed_to_applied;
+  eversion_t trimmed_to = recovery_state.get_last_rollback_info_trimmed_to_applied();
   for (vector<ghobject_t>::const_iterator i = rollback_obs.begin();
        i != rollback_obs.end();
        ++i) {
@@ -2567,7 +2561,7 @@ void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle)
   }
 
   if (!scrubber.active) {
-    ceph_assert(backfill_targets.empty());
+    ceph_assert(recovery_state.get_backfill_targets().empty());
 
     scrubber.deep = state_test(PG_STATE_DEEP_SCRUB);
 
index ab58099816435d05b32059ea40348e9d85420b66..2c20d80c18a55458892aa7c126c4a24a69210c2b 100644 (file)
@@ -193,7 +193,6 @@ protected:
   eversion_t &last_update_ondisk;
   eversion_t &last_complete_ondisk;
   eversion_t &last_update_applied;
-  eversion_t &last_rollback_info_trimmed_to_applied;
   map<pg_shard_t, pg_info_t> &peer_info;
   map<pg_shard_t, pg_missing_t> &peer_missing;
   set<pg_shard_t> &peer_log_requested;
@@ -201,8 +200,6 @@ protected:
   map<pg_shard_t,eversion_t> &peer_last_complete_ondisk;
   eversion_t &min_last_complete_ondisk;
   eversion_t &pg_trim_to;
-  set<pg_shard_t> &backfill_targets;
-  set<pg_shard_t> &async_recovery_targets;
   set<pg_shard_t> &might_have_unfound;
   MissingLoc &missing_loc;
 
@@ -807,10 +804,6 @@ protected:
   std::atomic<int64_t> local_num_bytes = 0;
 
 public:
-  bool is_backfill_targets(pg_shard_t osd) {
-    return recovery_state.is_backfill_targets(osd);
-  }
-
   // Space reserved for backfill is primary_num_bytes - local_num_bytes
   // Don't care that difference itself isn't atomic
   uint64_t get_reserved_num_bytes() {
index 73868cdd4fcf1870a45ce141536d70e2feaaf008..76bb83110124ef84d5a73791568f09fcd00d2569 100644 (file)
@@ -769,6 +769,12 @@ void PeeringState::init_primary_up_acting(
   ceph_assert(primary.osd == new_acting_primary);
 }
 
+void PeeringState::clear_recovery_state()
+{
+  async_recovery_targets.clear();
+  backfill_targets.clear();
+}
+
 void PeeringState::clear_primary_state()
 {
   psdout(10) << "clear_primary_state" << dendl;
@@ -788,6 +794,9 @@ void PeeringState::clear_primary_state()
   need_up_thru = false;
   missing_loc.clear();
   pg_log.reset_recovery_pointers();
+
+  clear_recovery_state();
+
   pl->clear_primary_state();
 }
 
@@ -2623,6 +2632,7 @@ void PeeringState::try_mark_clean()
 
   share_pg_info();
   pl->publish_stats_to_osd();
+  clear_recovery_state();
 }
 
 void PeeringState::split_into(
@@ -3001,7 +3011,7 @@ void PeeringState::update_calc_stats()
       int64_t peer_num_objects = peer.second.stats.stats.sum.num_objects;
       // Backfill targets always track num_objects accurately
       // all other peers track missing accurately.
-      if (is_backfill_targets(peer.first)) {
+      if (is_backfill_target(peer.first)) {
         missing = std::max((int64_t)0, num_objects - peer_num_objects);
       } else {
         if (peer_missing.count(peer.first)) {
@@ -4917,6 +4927,7 @@ PeeringState::Clean::Clean(my_context ctx)
     ceph_abort();
   }
 
+
   ps->try_mark_clean();
 
   context< PeeringMachine >().get_cur_transaction()->register_on_commit(
index b8f2b0ea58d6776a31927b609060acddbdad2bd4..696e10b4d3f6c7a349abd4ccbcdcb9a8d35ae055 100644 (file)
@@ -1288,6 +1288,7 @@ public:
     const vector<int> &newacting,
     int new_up_primary,
     int new_acting_primary);
+  void clear_recovery_state();
   void clear_primary_state();
   void check_past_interval_bounds() const;
   bool set_force_recovery(bool b);
@@ -1547,6 +1548,10 @@ public:
     return last_peering_reset;
   }
 
+  eversion_t get_last_rollback_info_trimmed_to_applied() const {
+    return last_rollback_info_trimmed_to_applied;
+  }
+
   /// Returns stable reference to internal pool structure
   const PGPool &get_pool() const {
     return pool;
@@ -1661,9 +1666,18 @@ public:
     return up_primary.osd;
   }
 
-  bool is_backfill_targets(pg_shard_t osd) {
+  bool is_backfill_target(pg_shard_t osd) const {
     return backfill_targets.count(osd);
   }
+  const set<pg_shard_t> &get_backfill_targets() const {
+    return backfill_targets;
+  }
+  bool is_async_recovery_target(pg_shard_t peer) const {
+    return async_recovery_targets.count(peer);
+  }
+  const set<pg_shard_t> &get_async_recovery_targets() const {
+    return async_recovery_targets;
+  }
 
   bool state_test(uint64_t m) const { return (state & m) != 0; }
   void state_set(uint64_t m) { state |= m; }
index 75ce5e4c37b0005695998e78ae4acccab3dee3fd..dc866d8d0ac188b46e97238f889292cab55126ec 100644 (file)
@@ -553,7 +553,7 @@ bool PrimaryLogPG::should_send_op(
       hoid <= last_backfill_started ||
       hoid <= peer_info[peer].last_backfill;
   if (!should_send) {
-    ceph_assert(is_backfill_targets(peer));
+    ceph_assert(is_backfill_target(peer));
     dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer
              << ", object " << hoid
              << " beyond std::max(last_backfill_started "
@@ -561,7 +561,7 @@ bool PrimaryLogPG::should_send_op(
              << peer_info[peer].last_backfill << ")" << dendl;
     return should_send;
   }
-  if (async_recovery_targets.count(peer) && peer_missing[peer].is_missing(hoid)) {
+  if (is_async_recovery_target(peer) && peer_missing[peer].is_missing(hoid)) {
     should_send = false;
     dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer
              << ", object " << hoid
@@ -648,14 +648,14 @@ bool PrimaryLogPG::is_degraded_or_backfilling_object(const hobject_t& soid)
     // This will not block the op and the object is async recovered later.
     if (peer_missing_entry != peer_missing.end() &&
        peer_missing_entry->second.get_items().count(soid)) {
-      if (async_recovery_targets.count(peer))
+      if (is_async_recovery_target(peer))
        continue;
       else
        return true;
     }
     // Object is degraded if after last_backfill AND
     // we are backfilling it
-    if (is_backfill_targets(peer) &&
+    if (is_backfill_target(peer) &&
        peer_info[peer].last_backfill <= soid &&
        last_backfill_started >= soid &&
        backfills_in_flight.count(soid))
@@ -666,7 +666,7 @@ bool PrimaryLogPG::is_degraded_or_backfilling_object(const hobject_t& soid)
 
 bool PrimaryLogPG::is_degraded_on_async_recovery_target(const hobject_t& soid)
 {
-  for (auto &i: async_recovery_targets) {
+  for (auto &i: get_async_recovery_targets()) {
     auto peer_missing_entry = peer_missing.find(i);
     if (peer_missing_entry != peer_missing.end() &&
         peer_missing_entry->second.get_items().count(soid)) {
@@ -1870,8 +1870,8 @@ void PrimaryLogPG::do_request(
 hobject_t PrimaryLogPG::earliest_backfill() const
 {
   hobject_t e = hobject_t::get_max();
-  for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     pg_shard_t bt = *i;
     map<pg_shard_t, pg_info_t>::const_iterator iter = peer_info.find(bt);
@@ -4190,7 +4190,7 @@ void PrimaryLogPG::do_scan(
       pg_shard_t from = m->from;
 
       // Check that from is in backfill_targets vector
-      ceph_assert(is_backfill_targets(from));
+      ceph_assert(is_backfill_target(from));
 
       BackfillInterval& bi = peer_backfill_info[from];
       bi.begin = m->begin;
@@ -4203,7 +4203,9 @@ void PrimaryLogPG::do_scan(
 
       if (waiting_on_backfill.erase(from)) {
        if (waiting_on_backfill.empty()) {
-         ceph_assert(peer_backfill_info.size() == backfill_targets.size());
+         ceph_assert(
+           peer_backfill_info.size() ==
+           get_backfill_targets().size());
          finish_recovery_op(hobject_t::get_max());
        }
       } else {
@@ -8541,8 +8543,8 @@ void PrimaryLogPG::apply_stats(
   info.stats.stats.add(delta_stats);
   info.stats.stats.floor(0);
 
-  for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     pg_shard_t bt = *i;
     pg_info_t& pinfo = peer_info[bt];
@@ -10479,8 +10481,8 @@ void PrimaryLogPG::issue_repop(RepGather *repop, OpContext *ctx)
   }
 
   bool requires_missing_loc = false;
-  for (set<pg_shard_t>::iterator i = async_recovery_targets.begin();
-       i != async_recovery_targets.end();
+  for (set<pg_shard_t>::iterator i = get_async_recovery_targets().begin();
+       i != get_async_recovery_targets().end();
        ++i) {
     if (*i == get_primary() || !peer_missing[*i].is_missing(soid)) continue;
     requires_missing_loc = true;
@@ -12087,14 +12089,14 @@ void PrimaryLogPG::on_activate_complete()
 
   publish_stats_to_osd();
 
-  if (!backfill_targets.empty()) {
+  if (get_backfill_targets().size()) {
     last_backfill_started = earliest_backfill();
     new_backfill = true;
     ceph_assert(!last_backfill_started.is_max());
-    dout(5) << __func__ << ": bft=" << backfill_targets
+    dout(5) << __func__ << ": bft=" << get_backfill_targets()
           << " from " << last_backfill_started << dendl;
-    for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-        i != backfill_targets.end();
+    for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+        i != get_backfill_targets().end();
         ++i) {
       dout(5) << "target shard " << *i
             << " from " << peer_info[*i].last_backfill
@@ -12370,7 +12372,7 @@ bool PrimaryLogPG::start_recovery_ops(
   bool deferred_backfill = false;
   if (recovering.empty() &&
       state_test(PG_STATE_BACKFILLING) &&
-      !backfill_targets.empty() && started < max &&
+      !get_backfill_targets().empty() && started < max &&
       missing.num_missing() == 0 &&
       waiting_on_backfill.empty()) {
     if (get_osdmap()->test_flag(CEPH_OSDMAP_NOBACKFILL)) {
@@ -12768,7 +12770,7 @@ uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &hand
     ceph_assert(pm != peer_missing.end());
     auto nm = pm->second.num_missing();
     if (nm != 0) {
-      if (async_recovery_targets.count(p)) {
+      if (is_async_recovery_target(p)) {
         async_by_num_missing.push_back(make_pair(nm, p));
       } else {
         replicas_by_num_missing.push_back(make_pair(nm, p));
@@ -12857,8 +12859,8 @@ uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &hand
 hobject_t PrimaryLogPG::earliest_peer_backfill() const
 {
   hobject_t e = hobject_t::get_max();
-  for (set<pg_shard_t>::const_iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     pg_shard_t peer = *i;
     map<pg_shard_t, BackfillInterval>::const_iterator iter =
@@ -12875,8 +12877,8 @@ bool PrimaryLogPG::all_peer_done() const
   // Primary hasn't got any more objects
   ceph_assert(backfill_info.empty());
 
-  for (set<pg_shard_t>::const_iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     pg_shard_t bt = *i;
     map<pg_shard_t, BackfillInterval>::const_iterator piter =
@@ -12923,11 +12925,11 @@ uint64_t PrimaryLogPG::recover_backfill(
   ThreadPool::TPHandle &handle, bool *work_started)
 {
   dout(10) << __func__ << " (" << max << ")"
-           << " bft=" << backfill_targets
+           << " bft=" << get_backfill_targets()
           << " last_backfill_started " << last_backfill_started
           << (new_backfill ? " new_backfill":"")
           << dendl;
-  ceph_assert(!backfill_targets.empty());
+  ceph_assert(!get_backfill_targets().empty());
 
   // Initialize from prior backfill state
   if (new_backfill) {
@@ -12936,8 +12938,8 @@ uint64_t PrimaryLogPG::recover_backfill(
     new_backfill = false;
 
     // initialize BackfillIntervals
-    for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-        i != backfill_targets.end();
+    for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+        i != get_backfill_targets().end();
         ++i) {
       peer_backfill_info[*i].reset(peer_info[*i].last_backfill);
     }
@@ -12947,8 +12949,8 @@ uint64_t PrimaryLogPG::recover_backfill(
     pending_backfill_updates.clear();
   }
 
-  for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     dout(10) << "peer osd." << *i
           << " info " << peer_info[*i]
@@ -12966,8 +12968,8 @@ uint64_t PrimaryLogPG::recover_backfill(
   vector<boost::tuple<hobject_t, eversion_t, pg_shard_t> > to_remove;
   set<hobject_t> add_to_stat;
 
-  for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     peer_backfill_info[*i].trim_to(
       std::max(peer_info[*i].last_backfill, last_backfill_started));
@@ -12988,8 +12990,8 @@ uint64_t PrimaryLogPG::recover_backfill(
     dout(20) << "   my backfill interval " << backfill_info << dendl;
 
     bool sent_scan = false;
-    for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-        i != backfill_targets.end();
+    for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+        i != get_backfill_targets().end();
         ++i) {
       pg_shard_t bt = *i;
       BackfillInterval& pbi = peer_backfill_info[bt];
@@ -13029,8 +13031,8 @@ uint64_t PrimaryLogPG::recover_backfill(
     if (check < backfill_info.begin) {
 
       set<pg_shard_t> check_targets;
-      for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-          i != backfill_targets.end();
+      for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+          i != get_backfill_targets().end();
           ++i) {
         pg_shard_t bt = *i;
         BackfillInterval& pbi = peer_backfill_info[bt];
@@ -13062,8 +13064,8 @@ uint64_t PrimaryLogPG::recover_backfill(
       eversion_t& obj_v = backfill_info.objects.begin()->second;
 
       vector<pg_shard_t> need_ver_targs, missing_targs, keep_ver_targs, skip_targs;
-      for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-          i != backfill_targets.end();
+      for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+          i != get_backfill_targets().end();
           ++i) {
        pg_shard_t bt = *i;
        BackfillInterval& pbi = peer_backfill_info[bt];
@@ -13128,7 +13130,7 @@ uint64_t PrimaryLogPG::recover_backfill(
       }
       dout(20) << "need_ver_targs=" << need_ver_targs
               << " keep_ver_targs=" << keep_ver_targs << dendl;
-      dout(20) << "backfill_targets=" << backfill_targets
+      dout(20) << "backfill_targets=" << get_backfill_targets()
               << " missing_targs=" << missing_targs
               << " skip_targs=" << skip_targs << dendl;
 
@@ -13204,8 +13206,8 @@ uint64_t PrimaryLogPG::recover_backfill(
        pending_backfill_updates.erase(i++)) {
     dout(20) << " pending_backfill_update " << i->first << dendl;
     ceph_assert(i->first > new_last_backfill);
-    for (set<pg_shard_t>::iterator j = backfill_targets.begin();
-        j != backfill_targets.end();
+    for (set<pg_shard_t>::const_iterator j = get_backfill_targets().begin();
+        j != get_backfill_targets().end();
         ++j) {
       pg_shard_t bt = *j;
       pg_info_t& pinfo = peer_info[bt];
@@ -13230,8 +13232,8 @@ uint64_t PrimaryLogPG::recover_backfill(
   // If new_last_backfill == MAX, then we will send OP_BACKFILL_FINISH to
   // all the backfill targets.  Otherwise, we will move last_backfill up on
   // those targets need it and send OP_BACKFILL_PROGRESS to them.
-  for (set<pg_shard_t>::iterator i = backfill_targets.begin();
-       i != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
+       i != get_backfill_targets().end();
        ++i) {
     pg_shard_t bt = *i;
     pg_info_t& pinfo = peer_info[bt];
@@ -13669,8 +13671,8 @@ void PrimaryLogPG::hit_set_persist()
   // look just at that.  This is necessary because our transactions
   // may include a modify of the new hit_set *and* a delete of the
   // old one, and this may span the backfill boundary.
-  for (set<pg_shard_t>::iterator p = backfill_targets.begin();
-       p != backfill_targets.end();
+  for (set<pg_shard_t>::const_iterator p = get_backfill_targets().begin();
+       p != get_backfill_targets().end();
        ++p) {
     ceph_assert(peer_info.count(*p));
     const pg_info_t& pi = peer_info[*p];
index a596658661b1e11f98e9328f088ee9a665af97a6..ad5876f7445d1fe26cce364955c13a530bb52d88 100644 (file)
@@ -339,7 +339,7 @@ public:
     return actingset;
   }
   const set<pg_shard_t> &get_backfill_shards() const override {
-    return backfill_targets;
+    return get_backfill_targets();
   }
 
   std::ostream& gen_dbg_prefix(std::ostream& out) const override {
@@ -1832,6 +1832,18 @@ public:
   void wait_for_unreadable_object(const hobject_t& oid, OpRequestRef op);
   void wait_for_all_missing(OpRequestRef op);
 
+  bool is_backfill_target(pg_shard_t osd) const {
+    return recovery_state.is_backfill_target(osd);
+  }
+  const set<pg_shard_t> &get_backfill_targets() const {
+    return recovery_state.get_backfill_targets();
+  }
+  bool is_async_recovery_target(pg_shard_t peer) const {
+    return recovery_state.is_async_recovery_target(peer);
+  }
+  const set<pg_shard_t> &get_async_recovery_targets() const {
+    return recovery_state.get_async_recovery_targets();
+  }
   bool is_degraded_or_backfilling_object(const hobject_t& oid);
   bool is_degraded_on_async_recovery_target(const hobject_t& soid);
   void wait_for_degraded_object(const hobject_t& oid, OpRequestRef op);