wait_for_clean() can miss the new pool if it races with pool create.
Fixes: http://tracker.ceph.com/issues/20465
Signed-off-by: David Zafman <dzafman@redhat.com>
function create_rbd_pool() {
ceph osd pool delete rbd rbd --yes-i-really-really-mean-it || return 1
- ceph osd pool create rbd $PG_NUM || return 1
+ create_pool rbd $PG_NUM || return 1
rbd pool init rbd
}
+function create_pool() {
+ ceph osd pool create "$@"
+ sleep 1
+}
+
#######################################################################
function run_mgr() {
ceph osd erasure-code-profile set myprofile \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 12 12 erasure myprofile \
+ create_pool $poolname 12 12 erasure myprofile \
|| return 1
wait_for_clean || return 1
}
mapping=DD_ \
crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \
layers='[ [ "DDc", "" ] ]' || return 1
- ceph osd pool create $poolname 12 12 erasure $profile \
+ create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
plugin=lrc \
k=4 m=2 l=3 \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 12 12 erasure $profile \
+ create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
ceph osd erasure-code-profile set profile-isa \
plugin=isa \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 1 1 erasure profile-isa \
+ create_pool $poolname 1 1 erasure profile-isa \
|| return 1
rados_put_get $dir $poolname || return 1
plugin=jerasure \
k=4 m=2 \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 12 12 erasure $profile \
+ create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
plugin=shec \
k=2 m=1 c=1 \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 12 12 erasure $profile \
+ create_pool $poolname 12 12 erasure $profile \
|| return 1
rados_put_get $dir $poolname || return 1
mapping='_DD' \
crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1
ceph osd erasure-code-profile get remap-profile
- ceph osd pool create remap-pool 12 12 erasure remap-profile \
+ create_pool remap-pool 12 12 erasure remap-profile \
|| return 1
#
plugin=jerasure \
k=2 m=1 \
crush-failure-domain=osd || return 1
- ceph osd pool create $poolname 1 1 erasure myprofile \
+ create_pool $poolname 1 1 erasure myprofile \
|| return 1
wait_for_clean || return 1
}
setup $dir || return 1
run_mon $dir a || return 1
create_rbd_pool || return 1
- ceph osd pool create $TEST_POOL 8
+ create_pool $TEST_POOL 8
local flag
for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
! ceph osd pool set $TEST_POOL min_size 0 || return 1
local ecpool=erasepool
- ceph osd pool create $ecpool 12 12 erasure default || return 1
+ create_pool $ecpool 12 12 erasure default || return 1
#erasue pool size=k+m, min_size=k
local size=$(ceph osd pool get $ecpool size|awk '{print $2}')
local min_size=$(ceph osd pool get $ecpool min_size|awk '{print $2}')
run_osd $dir 1 || return 1
run_osd $dir 2 || return 1
- ceph osd pool create hello 8 || return 1
+ create_pool hello 8 || return 1
echo "hello world" > $dir/hello
rados --pool hello put foo $dir/hello || return 1
grep "WRONG does not exist" || return 1
ceph osd erasure-code-profile set $profile || return 1
- ceph osd pool create poolname 12 12 erasure $profile || return 1
+ create_pool poolname 12 12 erasure $profile || return 1
! ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1
grep "poolname.*using.*$profile" $dir/out || return 1
ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1
run_osd $dir 2 || return 1
local poolname=testquoa
- ceph osd pool create $poolname 20
+ create_pool $poolname 20
local objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
local bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
sleep 5
- ceph osd pool create foo 16
+ create_pool foo 16
# write some objects
rados bench -p foo 10 write -b 4096 --no-cleanup || return 1
ceph osd erasure-code-profile set myprofile crush-failure-domain=osd $3 $4 $5 $6 $7 || return 1
- ceph osd pool create "$poolname" 1 1 erasure myprofile || return 1
+ create_pool "$poolname" 1 1 erasure myprofile || return 1
if [ "$allow_overwrites" = "true" ]; then
ceph osd pool set "$poolname" allow_ec_overwrites true || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- ceph osd pool create foo 1 || return 1
- ceph osd pool create $poolname 1 1 || return 1
+ create_pool foo 1 || return 1
+ create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
for i in $(seq 1 $total_objs) ; do
fi
done
create_rbd_pool || return 1
- ceph osd pool create foo 1
+ create_pool foo 1
create_ec_pool $poolname $allow_overwrites k=2 m=1 stripe_unit=2K --force || return 1
wait_for_clean || return 1
create_rbd_pool || return 1
wait_for_clean || return 1
- ceph osd pool create $poolname 1 1 || return 1
+ create_pool $poolname 1 1 || return 1
wait_for_clean || return 1
local osd=0
wait_for_clean || return 1
# Create a pool with a single pg
- ceph osd pool create $poolname 1 1
+ create_pool $poolname 1 1
+ wait_for_clean || return 1
poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
dd if=/dev/urandom of=$TESTDATA bs=1032 count=1