]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd/OSDMap: split clean_pg_upmaps into smaller helpers
authorxie xingguo <xie.xingguo@zte.com.cn>
Sat, 1 Jun 2019 11:46:25 +0000 (19:46 +0800)
committerxie xingguo <xie.xingguo@zte.com.cn>
Tue, 18 Jun 2019 02:15:41 +0000 (10:15 +0800)
- it's good to read.
- the updating pending_inc part should be made independent
  since it is going to be racy while running in parallel.

Signed-off-by: xie xingguo <xie.xingguo@zte.com.cn>
(cherry picked from commit 4d5cf1e4173e5151cc571af571edb2eab0bb46a7)

Conflicts:
slight conflicts with pg-merge

src/osd/OSDMap.cc
src/osd/OSDMap.h

index 0007a36c349964847718b86c551e89e4b1dae43e..fef9fc66f2250c4a6006f0df045ea1d315a16cee 100644 (file)
@@ -1638,25 +1638,27 @@ void OSDMap::clean_temps(CephContext *cct,
   }
 }
 
-bool OSDMap::clean_pg_upmaps(CephContext *cct,
-                             Incremental *pending_inc) const
+void OSDMap::get_upmap_pgs(vector<pg_t> *upmap_pgs) const
 {
-  ldout(cct, 10) << __func__ << dendl;
-  set<pg_t> to_check;
-  set<pg_t> to_cancel;
-  map<int, map<int, float>> rule_weight_map;
-  bool any_change = false;
+  upmap_pgs->reserve(pg_upmap.size() + pg_upmap_items.size());
+  for (auto& p : pg_upmap)
+    upmap_pgs->push_back(p.first);
+  for (auto& p : pg_upmap_items)
+    upmap_pgs->push_back(p.first);
+}
 
-  for (auto& p : pg_upmap) {
-    to_check.insert(p.first);
-  }
-  for (auto& p : pg_upmap_items) {
-    to_check.insert(p.first);
-  }
+bool OSDMap::check_pg_upmaps(
+  CephContext *cct,
+  const vector<pg_t>& to_check,
+  vector<pg_t> *to_cancel,
+  map<pg_t, mempool::osdmap::vector<pair<int,int>>> *to_remap) const
+{
+  bool any_change = false;
+  map<int, map<int, float>> rule_weight_map;
   for (auto& pg : to_check) {
     if (!pg_exists(pg)) {
       ldout(cct, 0) << __func__ << " pg " << pg << " is gone" << dendl;
-      to_cancel.insert(pg);
+      to_cancel->push_back(pg);
       continue;
     }
     vector<int> raw, up;
@@ -1666,7 +1668,7 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
       ldout(cct, 10) << " removing redundant pg_upmap "
                      << i->first << " " << i->second
                      << dendl;
-      to_cancel.insert(pg);
+      to_cancel->push_back(pg);
       continue;
     }
     auto j = pg_upmap_items.find(pg);
@@ -1688,14 +1690,14 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
         ldout(cct, 10) << " removing no-op pg_upmap_items "
                        << j->first << " " << j->second
                        << dendl;
-        to_cancel.insert(pg);
+        to_cancel->push_back(pg);
         continue;
       } else if (newmap != j->second) {
         ldout(cct, 10) << " simplifying partially no-op pg_upmap_items "
                        << j->first << " " << j->second
                        << " -> " << newmap
                        << dendl;
-        pending_inc->new_pg_upmap_items[pg] = newmap;
+        to_remap->insert({pg, newmap});
         any_change = true;
         continue;
       }
@@ -1709,7 +1711,7 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
       ldout(cct, 0) << __func__ << " verify_upmap of pg " << pg
                     << " returning " << r
                     << dendl;
-      to_cancel.insert(pg);
+      to_cancel->push_back(pg);
       continue;
     }
     // below we check against crush-topology changing..
@@ -1719,7 +1721,8 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
       auto r = crush->get_rule_weight_osd_map(crush_rule, &weight_map);
       if (r < 0) {
         lderr(cct) << __func__ << " unable to get crush weight_map for "
-                   << "crush_rule " << crush_rule << dendl;
+                   << "crush_rule " << crush_rule
+                   << dendl;
         continue;
       }
       rule_weight_map[crush_rule] = weight_map;
@@ -1733,17 +1736,27 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
       auto it = weight_map.find(osd);
       if (it == weight_map.end()) {
         // osd is gone or has been moved out of the specific crush-tree
-        to_cancel.insert(pg);
+        to_cancel->push_back(pg);
         break;
       }
       auto adjusted_weight = get_weightf(it->first) * it->second;
       if (adjusted_weight == 0) {
         // osd is out/crush-out
-        to_cancel.insert(pg);
+        to_cancel->push_back(pg);
         break;
       }
     }
   }
+  any_change = any_change || !to_cancel->empty();
+  return any_change;
+}
+
+void OSDMap::clean_pg_upmaps(
+  CephContext *cct,
+  Incremental *pending_inc,
+  const vector<pg_t>& to_cancel,
+  const map<pg_t, mempool::osdmap::vector<pair<int,int>>>& to_remap) const
+{
   for (auto &pg: to_cancel) {
     auto i = pending_inc->new_pg_upmap.find(pg);
     if (i != pending_inc->new_pg_upmap.end()) {
@@ -1777,7 +1790,22 @@ bool OSDMap::clean_pg_upmaps(CephContext *cct,
       pending_inc->old_pg_upmap_items.insert(pg);
     }
   }
-  any_change = any_change || !to_cancel.empty();
+  for (auto& i : to_remap)
+    pending_inc->new_pg_upmap_items[i.first] = i.second;
+}
+
+bool OSDMap::clean_pg_upmaps(
+  CephContext *cct,
+  Incremental *pending_inc) const
+{
+  ldout(cct, 10) << __func__ << dendl;
+  vector<pg_t> to_check;
+  vector<pg_t> to_cancel;
+  map<pg_t, mempool::osdmap::vector<pair<int,int>>> to_remap;
+
+  get_upmap_pgs(&to_check);
+  auto any_change = check_pg_upmaps(cct, to_check, &to_cancel, &to_remap);
+  clean_pg_upmaps(cct, pending_inc, to_cancel, to_remap);
   return any_change;
 }
 
index 71999968147df4f8e446ac004ca2daa722f54e6e..a93ff97c92218d7397ae42451112f887fd60d28d 100644 (file)
@@ -1009,6 +1009,17 @@ public:
    */
   uint64_t get_up_osd_features() const;
 
+  void get_upmap_pgs(vector<pg_t> *upmap_pgs) const;
+  bool check_pg_upmaps(
+    CephContext *cct,
+    const vector<pg_t>& to_check,
+    vector<pg_t> *to_cancel,
+    map<pg_t, mempool::osdmap::vector<pair<int,int>>> *to_remap) const;
+  void clean_pg_upmaps(
+    CephContext *cct,
+    Incremental *pending_inc,
+    const vector<pg_t>& to_cancel,
+    const map<pg_t, mempool::osdmap::vector<pair<int,int>>>& to_remap) const;
   bool clean_pg_upmaps(CephContext *cct, Incremental *pending_inc) const;
 
   int apply_incremental(const Incremental &inc);