pi->peering_crush_bucket_count = osdmap.stretch_bucket_count;
pi->peering_crush_bucket_target = osdmap.stretch_bucket_count;
pi->peering_crush_bucket_barrier = osdmap.stretch_mode_bucket;
- pi->peering_crush_mandatory_member = 0;
+ pi->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
if (osdmap.degraded_stretch_mode) {
pi->peering_crush_bucket_count = osdmap.degraded_stretch_mode;
pi->peering_crush_bucket_target = osdmap.degraded_stretch_mode;
- // pi->peering_crush_bucket_mandatory_member = 0;
+ // pi->peering_crush_bucket_mandatory_member = CRUSH_ITEM_NONE;
// TODO: drat, we don't record this ^ anywhere, though given that it
// necessarily won't exist elsewhere it likely doesn't matter
pi->min_size = pi->min_size / 2;
pool->peering_crush_bucket_count = bucket_count;
pool->peering_crush_bucket_target = bucket_count;
pool->peering_crush_bucket_barrier = dividing_id;
- pool->peering_crush_mandatory_member = 0;
+ pool->peering_crush_mandatory_member = CRUSH_ITEM_NONE;
pool->size = g_conf().get_val<uint64_t>("mon_stretch_pool_size");
pool->min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
}
if (pgi.second.peering_crush_bucket_count) {
pg_pool_t newp(pgi.second);
newp.peering_crush_bucket_count = osdmap.stretch_bucket_count;
- newp.peering_crush_mandatory_member = 0;
+ newp.peering_crush_mandatory_member = CRUSH_ITEM_NONE;
newp.min_size = g_conf().get_val<uint64_t>("mon_stretch_pool_min_size");
newp.last_force_op_resend = pending_inc.epoch;
pending_inc.new_pools[pgi.first] = newp;
* expect that this and other users should instead check against
* CRUSH_ITEM_NONE.
*/
- if (pool.info.peering_crush_mandatory_member != 0) {
+ if (pool.info.peering_crush_mandatory_member != CRUSH_ITEM_NONE) {
auto aiter = ancestors.find(pool.info.peering_crush_mandatory_member);
if (aiter != ancestors.end() &&
aiter->second.get_num_selected()) {
if (peering_crush_bucket_barrier != 0 ||
peering_crush_bucket_target != 0 ||
peering_crush_bucket_count !=0 ||
- peering_crush_mandatory_member) {
+ peering_crush_mandatory_member != CRUSH_ITEM_NONE) {
ceph_assert(v >= 30);
new_compat = 30;
}
<< want;
}
return false;
- } else if (peering_crush_mandatory_member &&
+ } else if (peering_crush_mandatory_member != CRUSH_ITEM_NONE &&
!ancestors.count(peering_crush_mandatory_member)) {
if (out) {
*out << __func__ << ": missing mandatory crush bucket member "
//TYPE_RAID4 = 2, // raid4 (never implemented)
TYPE_ERASURE = 3, // erasure-coded
};
+ static constexpr uint32_t pg_CRUSH_ITEM_NONE = 0x7fffffff; /* can't import crush.h here */
static std::string_view get_type_name(int t) {
switch (t) {
case TYPE_REPLICATED: return "replicated";
// of this bucket type...
uint32_t peering_crush_bucket_barrier = 0;
// including this one
- int32_t peering_crush_mandatory_member = 0;
+ int32_t peering_crush_mandatory_member = pg_CRUSH_ITEM_NONE;
// The per-bucket replica count is calculated with this "target"
// instead of the above crush_bucket_count. This means we can maintain a
// target size of 4 without attempting to place them all in 1 DC
// so that we know what they do/do not have explicitly before
// sending them any new info/logs/whatever.
for (unsigned i = 0; i < acting.size(); i++) {
- if (acting[i] != 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */)
+ if (acting[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
probe.insert(pg_shard_t(acting[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
// It may be possible to exclude the up nodes, but let's keep them in
// there for now.
for (unsigned i = 0; i < up.size(); i++) {
- if (up[i] != 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */)
+ if (up[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
probe.insert(pg_shard_t(up[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
uint32_t peering_crush_bucket_count = 0;
uint32_t peering_crush_bucket_target = 0;
uint32_t peering_crush_bucket_barrier = 0;
- int32_t peering_crush_mandatory_member = 0;
+ int32_t peering_crush_mandatory_member = CRUSH_ITEM_NONE;
bool used_replica = false;
bool paused = false;