From: Deepika Upadhyay Date: Wed, 12 Feb 2020 14:38:29 +0000 (+0530) Subject: mon/OSDMonitor: add flag `--yes-i-really-mean-it` for setting pool size 1 X-Git-Tag: v16.0.0~61^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=21508bd9ddd461a8fb67c665eb2b47a725ce995f;p=ceph.git mon/OSDMonitor: add flag `--yes-i-really-mean-it` for setting pool size 1 Adds option `mon_allow_pool_size_one` which will be disabled by default to ensure pools are not configured without replicas. If the user still wants to use pool size 1, they will have to change the value of `mon_allow_pool_size_one` to true and then have to pass flag `--yes-i-really-mean-it` to cli command: Example: `ceph osd pool test set size 1 --yes-i-really-mean-it` Fixes: https://tracker.ceph.com/issues/44025 Signed-off-by: Deepika Upadhyay --- diff --git a/PendingReleaseNotes b/PendingReleaseNotes index c9fd4c794518..cffac983c11e 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -316,3 +316,8 @@ If the zonegroup is part of a realm, the change must be committed with 'radosgw-admin period update --commit' - otherwise the change will take effect after radosgws are restarted. + +* Monitors now have config option ``mon_allow_pool_size_one``, which is disabled + by default. However, if enabled, user now have to pass the + ``--yes-i-really-mean-it`` flag to ``osd pool set size 1``, if they are really + sure of configuring pool size 1. diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh index 19fede194802..1d4aa4e5fb35 100755 --- a/qa/standalone/ceph-helpers.sh +++ b/qa/standalone/ceph-helpers.sh @@ -478,6 +478,7 @@ function run_mon() { --run-dir=$dir \ --pid-file=$dir/\$name.pid \ --mon-allow-pool-delete \ + --mon-allow-pool-size-one \ --osd-pool-default-pg-autoscale-mode off \ --mon-osd-backfillfull-ratio .99 \ "$@" || return 1 diff --git a/qa/standalone/mon/health-mute.sh b/qa/standalone/mon/health-mute.sh index a255798b82fb..d8e07ca061bb 100755 --- a/qa/standalone/mon/health-mute.sh +++ b/qa/standalone/mon/health-mute.sh @@ -38,7 +38,7 @@ function TEST_mute() { ceph -s ceph health | grep HEALTH_OK || return 1 # test warning on setting pool size=1 - ceph osd pool set foo size 1 + ceph osd pool set foo size 1 --yes-i-really-mean-it ceph -s ceph health | grep HEALTH_WARN || return 1 ceph health detail | grep POOL_NO_REDUNDANCY || return 1 diff --git a/qa/standalone/osd/osd-backfill-prio.sh b/qa/standalone/osd/osd-backfill-prio.sh index a089696bb9b4..c8c05ce19fb9 100755 --- a/qa/standalone/osd/osd-backfill-prio.sh +++ b/qa/standalone/osd/osd-backfill-prio.sh @@ -129,8 +129,8 @@ function TEST_backfill_priority() { fi done - ceph osd pool set $pool2 size 1 - ceph osd pool set $pool3 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it + ceph osd pool set $pool3 size 1 --yes-i-really-mean-it wait_for_clean || return 1 dd if=/dev/urandom of=$dir/data bs=1M count=10 @@ -405,9 +405,9 @@ function TEST_backfill_pool_priority() { pool1_prio=$(expr $DEGRADED_PRIO + 1 + $pool1_extra_prio) pool2_prio=$(expr $DEGRADED_PRIO + 1 + $pool2_extra_prio) - ceph osd pool set $pool1 size 1 + ceph osd pool set $pool1 size 1 --yes-i-really-mean-it ceph osd pool set $pool1 recovery_priority $pool1_extra_prio - ceph osd pool set $pool2 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it ceph osd pool set $pool2 recovery_priority $pool2_extra_prio wait_for_clean || return 1 diff --git a/qa/standalone/osd/osd-backfill-space.sh b/qa/standalone/osd/osd-backfill-space.sh index 3978668eae23..b2a1a821fdb2 100755 --- a/qa/standalone/osd/osd-backfill-space.sh +++ b/qa/standalone/osd/osd-backfill-space.sh @@ -124,7 +124,7 @@ function TEST_backfill_test_simple() { for p in $(seq 1 $pools) do create_pool "${poolprefix}$p" 1 1 - ceph osd pool set "${poolprefix}$p" size 1 + ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it done wait_for_clean || return 1 @@ -206,7 +206,7 @@ function TEST_backfill_test_multi() { for p in $(seq 1 $pools) do create_pool "${poolprefix}$p" 1 1 - ceph osd pool set "${poolprefix}$p" size 1 + ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it done wait_for_clean || return 1 @@ -364,8 +364,8 @@ function TEST_backfill_test_sametarget() { fi done - ceph osd pool set $pool1 size 1 - ceph osd pool set $pool2 size 1 + ceph osd pool set $pool1 size 1 --yes-i-really-mean-it + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it wait_for_clean || return 1 @@ -444,7 +444,7 @@ function TEST_backfill_multi_partial() { ceph osd set-require-min-compat-client luminous create_pool fillpool 1 1 - ceph osd pool set fillpool size 1 + ceph osd pool set fillpool size 1 --yes-i-really-mean-it for p in $(seq 1 $pools) do create_pool "${poolprefix}$p" 1 1 @@ -639,7 +639,7 @@ function TEST_ec_backfill_simple() { ceph osd set-backfillfull-ratio .85 create_pool fillpool 1 1 - ceph osd pool set fillpool size 1 + ceph osd pool set fillpool size 1 --yes-i-really-mean-it # Partially fill an osd # We have room for 200 18K replicated objects, if we create 13K objects @@ -770,7 +770,7 @@ function TEST_ec_backfill_multi() { ceph osd set-require-min-compat-client luminous create_pool fillpool 1 1 - ceph osd pool set fillpool size 1 + ceph osd pool set fillpool size 1 --yes-i-really-mean-it # Partially fill an osd # We have room for 200 18K replicated objects, if we create 9K objects @@ -888,7 +888,7 @@ function SKIP_TEST_ec_backfill_multi_partial() { ceph osd set-require-min-compat-client luminous create_pool fillpool 1 1 - ceph osd pool set fillpool size 1 + ceph osd pool set fillpool size 1 --yes-i-really-mean-it # last osd ceph osd pg-upmap 1.0 $lastosd @@ -1010,7 +1010,7 @@ function SKIP_TEST_ec_backfill_multi_partial() { ceph osd set-require-min-compat-client luminous create_pool fillpool 1 1 - ceph osd pool set fillpool size 1 + ceph osd pool set fillpool size 1 --yes-i-really-mean-it # Partially fill an osd # We have room for 200 48K ec objects, if we create 4k replicated objects diff --git a/qa/standalone/osd/osd-backfill-stats.sh b/qa/standalone/osd/osd-backfill-stats.sh index ea43b00fe462..26335678c659 100755 --- a/qa/standalone/osd/osd-backfill-stats.sh +++ b/qa/standalone/osd/osd-backfill-stats.sh @@ -143,7 +143,7 @@ function TEST_backfill_sizeup() { run_osd $dir 5 || return 1 create_pool $poolname 1 1 - ceph osd pool set $poolname size 1 + ceph osd pool set $poolname size 1 --yes-i-really-mean-it wait_for_clean || return 1 @@ -189,7 +189,7 @@ function TEST_backfill_sizeup_out() { run_osd $dir 5 || return 1 create_pool $poolname 1 1 - ceph osd pool set $poolname size 1 + ceph osd pool set $poolname size 1 --yes-i-really-mean-it wait_for_clean || return 1 diff --git a/qa/standalone/osd/osd-recovery-prio.sh b/qa/standalone/osd/osd-recovery-prio.sh index fb386e265ab3..672b407de900 100755 --- a/qa/standalone/osd/osd-recovery-prio.sh +++ b/qa/standalone/osd/osd-recovery-prio.sh @@ -125,8 +125,8 @@ function TEST_recovery_priority() { fi done - ceph osd pool set $pool2 size 1 - ceph osd pool set $pool3 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it + ceph osd pool set $pool3 size 1 --yes-i-really-mean-it wait_for_clean || return 1 dd if=/dev/urandom of=$dir/data bs=1M count=10 @@ -401,9 +401,9 @@ function TEST_recovery_pool_priority() { pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio) pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio) - ceph osd pool set $pool1 size 1 + ceph osd pool set $pool1 size 1 --yes-i-really-mean-it ceph osd pool set $pool1 recovery_priority $pool1_extra_prio - ceph osd pool set $pool2 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it ceph osd pool set $pool2 recovery_priority $pool2_extra_prio wait_for_clean || return 1 diff --git a/qa/standalone/osd/osd-recovery-space.sh b/qa/standalone/osd/osd-recovery-space.sh index 82cdf82e5733..07ed09b4380f 100755 --- a/qa/standalone/osd/osd-recovery-space.sh +++ b/qa/standalone/osd/osd-recovery-space.sh @@ -105,7 +105,7 @@ function TEST_recovery_test_simple() { for p in $(seq 1 $pools) do create_pool "${poolprefix}$p" 1 1 - ceph osd pool set "${poolprefix}$p" size 1 + ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it done wait_for_clean || return 1 diff --git a/qa/standalone/osd/osd-recovery-stats.sh b/qa/standalone/osd/osd-recovery-stats.sh index b6955bb4c60c..7d88e98a0c09 100755 --- a/qa/standalone/osd/osd-recovery-stats.sh +++ b/qa/standalone/osd/osd-recovery-stats.sh @@ -314,7 +314,7 @@ function TEST_recovery_undersized() { done create_pool $poolname 1 1 - ceph osd pool set $poolname size 1 + ceph osd pool set $poolname size 1 --yes-i-really-mean-it wait_for_clean || return 1 diff --git a/qa/tasks/ceph.conf.template b/qa/tasks/ceph.conf.template index 493eacb32caa..6eff6e339a0c 100644 --- a/qa/tasks/ceph.conf.template +++ b/qa/tasks/ceph.conf.template @@ -30,6 +30,7 @@ mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false mon_warn_on_pool_no_redundancy = false + mon_allow_pool_size_one = true osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd" diff --git a/qa/workunits/ceph-helpers-root.sh b/qa/workunits/ceph-helpers-root.sh index 934380e5c475..c8fa4a3fe41d 100755 --- a/qa/workunits/ceph-helpers-root.sh +++ b/qa/workunits/ceph-helpers-root.sh @@ -108,7 +108,7 @@ function pool_read_write() { ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1 ceph osd pool create $test_pool 4 || return 1 - ceph osd pool set $test_pool size $size || return 1 + ceph osd pool set $test_pool size $size --yes-i-really-mean-it || return 1 ceph osd pool set $test_pool min_size $size || return 1 ceph osd pool application enable $test_pool rados diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 51d8bd7c7bc3..16d8fa551e48 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -2174,9 +2174,9 @@ function test_mon_osd_pool_set() old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //') (( new_size = old_size + 1 )) - ceph osd pool set $TEST_POOL_GETSET size $new_size + ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size" - ceph osd pool set $TEST_POOL_GETSET size $old_size + ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it ceph osd pool create pool_erasure 1 1 erasure ceph osd pool application enable pool_erasure rados diff --git a/qa/workunits/mon/pool_ops.sh b/qa/workunits/mon/pool_ops.sh index 4098795b9892..198a65869c7c 100755 --- a/qa/workunits/mon/pool_ops.sh +++ b/qa/workunits/mon/pool_ops.sh @@ -19,7 +19,7 @@ ceph osd pool create foooo 123 ceph osd pool create foo 123 # idempotent -ceph osd pool set foo size 1 +ceph osd pool set foo size 1 --yes-i-really-mean-it ceph osd pool set foo size 4 ceph osd pool set foo size 10 expect_false ceph osd pool set foo size 0 diff --git a/qa/workunits/rados/test_alloc_hint.sh b/qa/workunits/rados/test_alloc_hint.sh index 2323915fce74..535201ca3c45 100755 --- a/qa/workunits/rados/test_alloc_hint.sh +++ b/qa/workunits/rados/test_alloc_hint.sh @@ -109,7 +109,7 @@ setup_osd_data POOL="alloc_hint-rep" ceph osd pool create "${POOL}" "${NUM_PG}" -ceph osd pool set "${POOL}" size "${NUM_OSDS}" +ceph osd pool set "${POOL}" size "${NUM_OSDS}" --yes-i-really-mean-it ceph osd pool application enable "${POOL}" rados OBJ="foo" diff --git a/src/common/options.cc b/src/common/options.cc index 449e5ea585c4..54c70e8b4fee 100644 --- a/src/common/options.cc +++ b/src/common/options.cc @@ -1736,6 +1736,11 @@ std::vector