]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mon: osd: switch crush_bucket_mandatory_member to be CRUSH_ITEM_NONE-based
authorGreg Farnum <gfarnum@redhat.com>
Thu, 16 Jul 2020 03:08:35 +0000 (03:08 +0000)
committerGreg Farnum <gfarnum@redhat.com>
Tue, 21 Jul 2020 17:59:15 +0000 (17:59 +0000)
Previously we compared it to zero, but we could technically want to require
osd.0 as a member, maybe? In any case we have a "DNE" indicator
in CRUSH_ITEM_NONE, so use it.

Also, for osd_types.h, declare a pg_pool_t::pg_CRUSH_ITEM_NONE to use,
since apparently we can't import crush.h there and hard-coding it is bad.

Signed-off-by: Greg Farnum <gfarnum@redhat.com>
src/mon/OSDMonitor.cc
src/osd/PeeringState.cc
src/osd/osd_types.cc
src/osd/osd_types.h
src/osdc/Objecter.h

index a2b1122d5b8740e4c9308029e5f9938bb9b4c512..28f99ec2953a078317e04dce9dcfd2204507e7fb 100644 (file)
@@ -7936,11 +7936,11 @@ int OSDMonitor::prepare_new_pool(string& name,
     pi->peering_crush_bucket_count = osdmap.stretch_bucket_count;
     pi->peering_crush_bucket_target = osdmap.stretch_bucket_count;
     pi->peering_crush_bucket_barrier = osdmap.stretch_mode_bucket;
-    pi->peering_crush_mandatory_member = 0;
+    pi->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
     if (osdmap.degraded_stretch_mode) {
       pi->peering_crush_bucket_count = osdmap.degraded_stretch_mode;
       pi->peering_crush_bucket_target = osdmap.degraded_stretch_mode;
-      // pi->peering_crush_bucket_mandatory_member = 0;
+      // pi->peering_crush_bucket_mandatory_member = CRUSH_ITEM_NONE;
       // TODO: drat, we don't record this ^ anywhere, though given that it
       // necessarily won't exist elsewhere it likely doesn't matter
       pi->min_size = pi->min_size / 2;
@@ -14315,7 +14315,7 @@ void OSDMonitor::try_enable_stretch_mode(stringstream& ss, bool *okay,
       pool->peering_crush_bucket_count = bucket_count;
       pool->peering_crush_bucket_target = bucket_count;
       pool->peering_crush_bucket_barrier = dividing_id;
-      pool->peering_crush_mandatory_member = 0;
+      pool->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
       pool->size = g_conf().get_val<uint64_t>("mon_stretch_pool_size");
       pool->min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
     }
@@ -14472,7 +14472,7 @@ void OSDMonitor::trigger_healthy_stretch_mode()
     if (pgi.second.peering_crush_bucket_count) {
       pg_pool_t newp(pgi.second);
       newp.peering_crush_bucket_count = osdmap.stretch_bucket_count;
-      newp.peering_crush_mandatory_member = 0;
+      newp.peering_crush_mandatory_member = CRUSH_ITEM_NONE;
       newp.min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
       newp.last_force_op_resend = pending_inc.epoch;
       pending_inc.new_pools[pgi.first] = newp;
index 93b369d8418eb33b71c88a7621606eda6df07fb5..ba3c7d46150598909542d1b068276ecb6f1fd747 100644 (file)
@@ -2077,7 +2077,7 @@ void PeeringState::calc_replicated_acting_stretch(
    * expect that this and other users should instead check against
    * CRUSH_ITEM_NONE.
    */
-  if (pool.info.peering_crush_mandatory_member != 0) {
+  if (pool.info.peering_crush_mandatory_member != CRUSH_ITEM_NONE) {
     auto aiter = ancestors.find(pool.info.peering_crush_mandatory_member);
     if (aiter != ancestors.end() &&
        aiter->second.get_num_selected()) {
index 8b7c3f088f4fcb3aff467a3f6b0aaf4b04add9f2..198fe89d7822458e81662f3153e8d12cf96cb4a2 100644 (file)
@@ -2036,7 +2036,7 @@ void pg_pool_t::encode(ceph::buffer::list& bl, uint64_t features) const
   if (peering_crush_bucket_barrier != 0 ||
       peering_crush_bucket_target != 0 ||
       peering_crush_bucket_count !=0 ||
-      peering_crush_mandatory_member) {
+      peering_crush_mandatory_member != CRUSH_ITEM_NONE) {
     ceph_assert(v >= 30);
     new_compat = 30;
   }
@@ -2255,7 +2255,7 @@ bool pg_pool_t::stretch_set_can_peer(const set<int>& want, const OSDMap& osdmap,
           << want;
     }
     return false;
-  } else if (peering_crush_mandatory_member &&
+  } else if (peering_crush_mandatory_member != CRUSH_ITEM_NONE &&
             !ancestors.count(peering_crush_mandatory_member)) {
     if (out) {
       *out << __func__ << ": missing mandatory crush bucket member "
index 9dbee932d26733dff1201732b3fb4b370db60765..811075fb3eba54de17178ce366e701be64ef0e0a 100644 (file)
@@ -1175,6 +1175,7 @@ struct pg_pool_t {
     //TYPE_RAID4 = 2,   // raid4 (never implemented)
     TYPE_ERASURE = 3,      // erasure-coded
   };
+  static constexpr uint32_t pg_CRUSH_ITEM_NONE = 0x7fffffff; /* can't import crush.h here */
   static std::string_view get_type_name(int t) {
     switch (t) {
     case TYPE_REPLICATED: return "replicated";
@@ -1387,7 +1388,7 @@ public:
   // of this bucket type...
   uint32_t peering_crush_bucket_barrier = 0;
   // including this one
-  int32_t peering_crush_mandatory_member = 0;
+  int32_t peering_crush_mandatory_member = pg_CRUSH_ITEM_NONE;
   // The per-bucket replica count is calculated with this "target"
   // instead of the above crush_bucket_count. This means we can maintain a
   // target size of 4 without attempting to place them all in 1 DC
@@ -3492,13 +3493,13 @@ PastIntervals::PriorSet::PriorSet(
   // so that we know what they do/do not have explicitly before
   // sending them any new info/logs/whatever.
   for (unsigned i = 0; i < acting.size(); i++) {
-    if (acting[i] != 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */)
+    if (acting[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
       probe.insert(pg_shard_t(acting[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
   }
   // It may be possible to exclude the up nodes, but let's keep them in
   // there for now.
   for (unsigned i = 0; i < up.size(); i++) {
-    if (up[i] != 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */)
+    if (up[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
       probe.insert(pg_shard_t(up[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
   }
 
index 85e08eab547e8a91233d6cd444bf3cb8a3979b52..8dcbf8177002ba7cea72d75493cfdaf4023f84db 100644 (file)
@@ -1781,7 +1781,7 @@ public:
     uint32_t peering_crush_bucket_count = 0;
     uint32_t peering_crush_bucket_target = 0;
     uint32_t peering_crush_bucket_barrier = 0;
-    int32_t peering_crush_mandatory_member = 0;
+    int32_t peering_crush_mandatory_member = CRUSH_ITEM_NONE;
 
     bool used_replica = false;
     bool paused = false;