export -n CEPH_CLI_TEST_DUP_COMMAND
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
+ setup $dir || return 1
$func $dir || return 1
+ teardown $dir || return 1
done
}
local dir=$1
local poolname=rbd
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
# Reproduces http://tracker.ceph.com/issues/8914
corrupt_and_repair_one $dir $poolname $(get_primary $poolname SOMETHING) || return 1
-
- teardown $dir || return 1
}
#
local dir=$1
local poolname=rbd
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_scrub_during_recovery=false \
add_something $dir $poolname || return 1
corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
-
- teardown $dir || return 1
}
#
local dir=$1
local poolname=rbd
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 --osd_scrub_during_recovery=false \
add_something $dir $poolname || return 1
scrub_and_not_schedule $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1
-
- teardown $dir || return 1
}
function scrub_and_not_schedule() {
local poolname=ecpool
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1
rados --pool $poolname get SOMETHING $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
-
- # Tear down
- teardown $dir || return 1
}
function TEST_auto_repair_erasure_coded_appends() {
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING get-bytes $dir/COPY || return 1
diff $dir/ORIGINAL $dir/COPY || return 1
grep scrub_finish $dir/osd.${primary}.log
-
- # Tear down
- teardown $dir || return 1
}
function TEST_auto_repair_bluestore_scrub() {
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
# This should have caused 1 object to be repaired
COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
test "$COUNT" = "1" || return 1
-
- # Tear down
- teardown $dir || return 1
}
function TEST_auto_repair_bluestore_failed() {
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
ceph pg dump pgs
ceph pg dump pgs | grep -q -e "^${pgid}.* active+clean " -e "^${pgid}.* active+clean+wait " || return 1
grep scrub_finish $dir/osd.${primary}.log
-
- # Tear down
- teardown $dir || return 1
}
function TEST_auto_repair_bluestore_failed_norecov() {
local poolname=testpool
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-auto-repair=true \
grep -q "scrub_finish.*present with no repair possible" $dir/osd.${primary}.log || return 1
ceph pg dump pgs
ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1
-
- # Tear down
- teardown $dir || return 1
}
function TEST_repair_stats() {
local REPAIRS=20
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$REPAIRS" || return 1
-
- # Tear down
- teardown $dir || return 1
}
function TEST_repair_stats_ec() {
local allow_overwrites=false
# Launch a cluster with 5 seconds scrub interval
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum"
COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
test "$COUNT" = "$REPAIRS" || return 1
-
- # Tear down
- teardown $dir || return 1
}
function corrupt_and_repair_jerasure() {
local allow_overwrites=$2
local poolname=ecpool
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 3) ; do
create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1
corrupt_and_repair_erasure_coded $dir $poolname || return 1
-
- teardown $dir || return 1
}
function TEST_corrupt_and_repair_jerasure_appends() {
local allow_overwrites=$2
local poolname=ecpool
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 9) ; do
create_ec_pool $poolname $allow_overwrites k=4 m=2 l=3 plugin=lrc || return 1
corrupt_and_repair_erasure_coded $dir $poolname || return 1
-
- teardown $dir || return 1
}
function TEST_corrupt_and_repair_lrc_appends() {
local poolname=ecpool
local payload=ABCDEF
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 3) ; do
ceph -s|grep "4 up" || return 1
ceph -s|grep "4 in" || return 1
ceph -s|grep "1/1 objects unfound" || return 1
-
- teardown $dir || return 1
}
function TEST_unfound_erasure_coded_appends() {
local allow_overwrites=$2
local poolname=ecpool
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 2) ; do
matches=$(ceph pg $pg list_unfound | egrep "MOBJ0|MOBJ1" | wc -l)
[ $matches -eq 2 ] && break
done
-
- teardown $dir || return 1
}
function TEST_list_missing_erasure_coded_appends() {
local poolname=csr_pool
local total_objs=19
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
- teardown $dir || return 1
}
local poolname=ecpool
local total_objs=7
- setup $dir || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
for id in $(seq 0 2) ; do
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
- teardown $dir || return 1
}
function TEST_corrupt_scrub_erasure_appends() {
local poolname=psr_pool
local objname=POBJ
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 "
local conf_overdue_seconds=$(calc $i7_days + $i1_day + \( $i7_days \* $overdue \) )
local pool_overdue_seconds=$(calc $i14_days + $i1_day + \( $i14_days \* $overdue \) )
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x --mon_warn_pg_not_scrubbed_ratio=${overdue} --mon_warn_pg_not_deep_scrubbed_ratio=${overdue} || return 1
run_osd $dir 0 $ceph_osd_args --osd_scrub_backoff_ratio=0 || return 1
ceph health detail | grep "not deep-scrubbed since"
return 1
fi
- return 0
}
#
local poolname=csr_pool
local total_objs=2
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
fi
ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it
- teardown $dir || return 1
}
function TEST_request_scrub_priority() {
local OBJECTS=64
local PGS=8
- setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
run_mgr $dir x || return 1
local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 "
# Verify that the requested scrub ran first
grep "log_channel.*scrub ok" $dir/osd.${primary}.log | grep -v purged_snaps | head -1 | sed 's/.*[[]DBG[]]//' | grep -q $pg || return 1
-
- return 0
}