From: Kamoltat Sirivadhna Date: Tue, 5 Nov 2024 20:53:41 +0000 (+0000) Subject: [3AZ Stretch pool]: Allow user to specify values when unsetting pools X-Git-Tag: testing/wip-pdonnell-testing-20250324.181635-debug~12^2~2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=cacc5f5127047383eee8e6e193d7a9997083a310;p=ceph-ci.git [3AZ Stretch pool]: Allow user to specify values when unsetting pools Problem: When we enable stretched mode on the pools, we modify 6 configs on the pool, namely peering_crush_bucket_count, peering_crush_bucket_target, peering_crush_bucket_barrier, crush_rule, size, min_size. Out of these, only 3 configs, namely peering_crush_bucket_count, peering_crush_bucket_target, peering_crush_bucket_barrier is reset to 0. The remaining 3 configs, namely crush_rule, size, min_size are not reverted back, and are still the values that were set with stretch set command. Solution: The unset command now is required to specify `crush_rule`, `size`, `min_size`. Fixes: https://tracker.ceph.com/issues/68842 Signed-off-by: Kamoltat Sirivadhna --- diff --git a/src/mon/MonCommands.h b/src/mon/MonCommands.h index a53d1adce9e..7aa9fb58d74 100644 --- a/src/mon/MonCommands.h +++ b/src/mon/MonCommands.h @@ -1234,7 +1234,10 @@ COMMAND("osd pool stretch set " "make the pool stretched across the specified number of CRUSH buckets", "osd", "rw") COMMAND("osd pool stretch unset " - "name=pool,type=CephPoolname", + "name=pool,type=CephPoolname " + "name=crush_rule,type=CephString " + "name=size,type=CephInt,range=0 " + "name=min_size,type=CephInt,range=0 ", "unset the stretch mode for the pool", "osd", "rw") COMMAND("osd utilization", diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 4807f9dd166..4bff8b3bb4b 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -9237,11 +9237,44 @@ int OSDMonitor::prepare_command_pool_stretch_unset(const cmdmap_t& cmdmap, ss << "pool " << pool_name << " is not a stretch pool"; return -ENOENT; } + CrushWrapper& crush = _get_stable_crush(); + string crush_rule_str; + cmd_getval(cmdmap, "crush_rule", crush_rule_str); + if (crush_rule_str.empty()) { + ss << "crush_rule must be provided"; + return -EINVAL; + } + + int crush_rule = crush.get_rule_id(crush_rule_str); + if (crush_rule < 0) { + ss << "crush rule " << crush_rule_str << " does not exist"; + return -ENOENT; + } + + if (!crush.rule_valid_for_pool_type(crush_rule, p.get_type())) { + ss << "crush rule " << crush_rule << " type does not match pool"; + return -EINVAL; + } + + int64_t pool_size = cmd_getval_or(cmdmap, "size", 0); + if (pool_size < 0) { + ss << "pool size must be non-negative"; + return -EINVAL; + } + + int64_t pool_min_size = cmd_getval_or(cmdmap, "min_size", 0); + if (pool_min_size < 0) { + ss << "pool min_size must be non-negative"; + return -EINVAL; + } // unset stretch values p.peering_crush_bucket_count = 0; p.peering_crush_bucket_target = 0; p.peering_crush_bucket_barrier = 0; + p.crush_rule = static_cast<__u8>(crush_rule); + p.size = static_cast<__u8>(pool_size); + p.min_size = static_cast<__u8>(pool_min_size); p.last_change = pending_inc.epoch; pending_inc.new_pools[pool] = p; ss << "pool " << pool_name