If the zonegroup is part of a realm, the change must be committed with
'radosgw-admin period update --commit' - otherwise the change will take
effect after radosgws are restarted.
+
+* Monitors now have config option ``mon_allow_pool_size_one``, which is disabled
+ by default. However, if enabled, user now have to pass the
+ ``--yes-i-really-mean-it`` flag to ``osd pool set size 1``, if they are really
+ sure of configuring pool size 1.
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
--mon-allow-pool-delete \
+ --mon-allow-pool-size-one \
--osd-pool-default-pg-autoscale-mode off \
--mon-osd-backfillfull-ratio .99 \
"$@" || return 1
ceph -s
ceph health | grep HEALTH_OK || return 1
# test warning on setting pool size=1
- ceph osd pool set foo size 1
+ ceph osd pool set foo size 1 --yes-i-really-mean-it
ceph -s
ceph health | grep HEALTH_WARN || return 1
ceph health detail | grep POOL_NO_REDUNDANCY || return 1
fi
done
- ceph osd pool set $pool2 size 1
- ceph osd pool set $pool3 size 1
+ ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
+ ceph osd pool set $pool3 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
pool1_prio=$(expr $DEGRADED_PRIO + 1 + $pool1_extra_prio)
pool2_prio=$(expr $DEGRADED_PRIO + 1 + $pool2_extra_prio)
- ceph osd pool set $pool1 size 1
+ ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
- ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
wait_for_clean || return 1
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
- ceph osd pool set "${poolprefix}$p" size 1
+ ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
- ceph osd pool set "${poolprefix}$p" size 1
+ ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
fi
done
- ceph osd pool set $pool1 size 1
- ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
+ ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
- ceph osd pool set fillpool size 1
+ ceph osd pool set fillpool size 1 --yes-i-really-mean-it
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
ceph osd set-backfillfull-ratio .85
create_pool fillpool 1 1
- ceph osd pool set fillpool size 1
+ ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 18K replicated objects, if we create 13K objects
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
- ceph osd pool set fillpool size 1
+ ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 18K replicated objects, if we create 9K objects
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
- ceph osd pool set fillpool size 1
+ ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# last osd
ceph osd pg-upmap 1.0 $lastosd
ceph osd set-require-min-compat-client luminous
create_pool fillpool 1 1
- ceph osd pool set fillpool size 1
+ ceph osd pool set fillpool size 1 --yes-i-really-mean-it
# Partially fill an osd
# We have room for 200 48K ec objects, if we create 4k replicated objects
run_osd $dir 5 || return 1
create_pool $poolname 1 1
- ceph osd pool set $poolname size 1
+ ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
run_osd $dir 5 || return 1
create_pool $poolname 1 1
- ceph osd pool set $poolname size 1
+ ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
fi
done
- ceph osd pool set $pool2 size 1
- ceph osd pool set $pool3 size 1
+ ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
+ ceph osd pool set $pool3 size 1 --yes-i-really-mean-it
wait_for_clean || return 1
dd if=/dev/urandom of=$dir/data bs=1M count=10
pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio)
pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio)
- ceph osd pool set $pool1 size 1
+ ceph osd pool set $pool1 size 1 --yes-i-really-mean-it
ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
- ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool2 size 1 --yes-i-really-mean-it
ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
wait_for_clean || return 1
for p in $(seq 1 $pools)
do
create_pool "${poolprefix}$p" 1 1
- ceph osd pool set "${poolprefix}$p" size 1
+ ceph osd pool set "${poolprefix}$p" size 1 --yes-i-really-mean-it
done
wait_for_clean || return 1
done
create_pool $poolname 1 1
- ceph osd pool set $poolname size 1
+ ceph osd pool set $poolname size 1 --yes-i-really-mean-it
wait_for_clean || return 1
mon warn on too few osds = false
mon_warn_on_pool_pg_num_not_power_of_two = false
mon_warn_on_pool_no_redundancy = false
+ mon_allow_pool_size_one = true
osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd"
ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1
ceph osd pool create $test_pool 4 || return 1
- ceph osd pool set $test_pool size $size || return 1
+ ceph osd pool set $test_pool size $size --yes-i-really-mean-it || return 1
ceph osd pool set $test_pool min_size $size || return 1
ceph osd pool application enable $test_pool rados
old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //')
(( new_size = old_size + 1 ))
- ceph osd pool set $TEST_POOL_GETSET size $new_size
+ ceph osd pool set $TEST_POOL_GETSET size $new_size --yes-i-really-mean-it
ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size"
- ceph osd pool set $TEST_POOL_GETSET size $old_size
+ ceph osd pool set $TEST_POOL_GETSET size $old_size --yes-i-really-mean-it
ceph osd pool create pool_erasure 1 1 erasure
ceph osd pool application enable pool_erasure rados
ceph osd pool create foo 123 # idempotent
-ceph osd pool set foo size 1
+ceph osd pool set foo size 1 --yes-i-really-mean-it
ceph osd pool set foo size 4
ceph osd pool set foo size 10
expect_false ceph osd pool set foo size 0
POOL="alloc_hint-rep"
ceph osd pool create "${POOL}" "${NUM_PG}"
-ceph osd pool set "${POOL}" size "${NUM_OSDS}"
+ceph osd pool set "${POOL}" size "${NUM_OSDS}" --yes-i-really-mean-it
ceph osd pool application enable "${POOL}" rados
OBJ="foo"
.add_see_also("osd_pool_default_size")
.add_see_also("osd_pool_default_min_size"),
+ Option("mon_allow_pool_size_one", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
+ .set_default(false)
+ .add_service("mon")
+ .set_description("allow configuring pool with no replicas"),
+
Option("mon_warn_on_misplaced", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(false)
.add_service("mgr")
ss << "pool size must be between 1 and 10";
return -EINVAL;
}
+ if (n == 1) {
+ if (!g_conf().get_val<bool>("mon_allow_pool_size_one")) {
+ ss << "configuring pool size as 1 is disabled by default.";
+ return -EPERM;
+ }
+ bool sure = false;
+ cmd_getval(cmdmap, "yes_i_really_mean_it", sure);
+ if (!sure) { ss << "WARNING: setting pool size 1 could lead to data loss "
+ "without recovery. If you are *ABSOLUTELY CERTAIN* that is what you want, "
+ "pass the flag --yes-i-really-mean-it.";
+ return -EPERM;
+ }
+ }
if (!osdmap.crush->check_crush_rule(p.get_crush_rule(), p.type, n, ss)) {
return -EINVAL;
}
for pool in `./ceph osd pool ls`; do
local size=`./ceph osd pool get ${pool} size | awk '{print $2}'`
if [ "${size}" -gt "${CEPH_NUM_OSD}" ]; then
- ./ceph osd pool set ${pool} size ${CEPH_NUM_OSD}
+ ./ceph osd pool set ${pool} size ${CEPH_NUM_OSD} --yes-i-really-mean-it
changed=1
fi
done
mon_data_avail_warn = 2
mon_data_avail_crit = 1
mon_allow_pool_delete = true
+mon_allow_pool_size_one = true
[osd]
osd_scrub_load_threshold = 2000