# @param delays sequence of sleep times before failure
#
function kill_daemon() {
+ set -x
local pid=$(cat $1)
local send_signal=$2
local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120}
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
name_prefix=osd
ceph osd dump | grep "osd.0 down" || return 1
+ name_prefix=mgr
+ for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
+ #
+ # kill the mgr
+ #
+ kill_daemon $pidfile TERM || return 1
+ done
+
name_prefix=mon
for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do
#
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
#
# sending signal 0 won't kill the daemon
kill_daemons $dir TERM osd || return 1
ceph osd dump | grep "osd.0 down" || return 1
#
+ # kill the mgr
+ #
+ kill_daemons $dir TERM mgr || return 1
+ #
# kill the mon and verify it cannot be reached
#
kill_daemons $dir TERM || return 1
#######################################################################
+function run_mgr() {
+ local dir=$1
+ shift
+ local id=$1
+ shift
+ local data=$dir/$id
+
+ ceph-mgr \
+ --id $id \
+ --erasure-code-dir=$CEPH_LIB \
+ --plugin-dir=$CEPH_LIB \
+ --debug-mgr 20 \
+ --debug-objecter 20 \
+ --debug-ms 20 \
+ --debug-paxos 20 \
+ --chdir= \
+ --mgr-data=$data \
+ --log-file=$dir/\$name.log \
+ --admin-socket=$dir/\$cluster-\$name.asok \
+ --run-dir=$dir \
+ --pid-file=$dir/\$name.pid \
+ "$@" || return 1
+}
+
+#######################################################################
+
##
# Create (prepare) and run (activate) an osd by the name osd.**id**
# with data in **dir**/**id**. The logs can be found in
setup $dir || return 1
run_mon $dir a || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
setup $dir || return 1
run_mon $dir a || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
destroy_osd $dir 0 || return 1
! ceph osd dump | grep "osd.$id " || return 1
setup $dir || return 1
run_mon $dir a || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
local dir=$1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_osd up 0 || return 1
kill_daemons $dir TERM osd || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
wait_for_clean || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
test $(get_config mon a osd_pool_default_size) = 1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 --osd_max_scrubs=3 || return 1
test $(get_config osd 0 osd_max_scrubs) = 3 || return 1
teardown $dir || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
local osd=0
+ run_mgr $dir x || return 1
run_osd $dir $osd || return 1
wait_for_clean || return 1
test $(get_primary rbd GROUP) = $osd || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=2 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
run_osd $dir 1 || return 1
wait_for_clean || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
local osd=0
+ run_mgr $dir x || return 1
run_osd $dir $osd || return 1
wait_for_clean || return 1
rados --pool rbd put GROUP /etc/group || return 1
setup $dir || return 1
run_mon $dir a || return 1
+ run_mgr $dir x || return 1
! get_is_making_recovery_progress || return 1
teardown $dir || return 1
}
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
local num_active_clean=$(get_num_active_clean)
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
local num_pgs=$(get_num_pgs)
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
stamp=$(get_last_scrub_stamp 1.0)
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
! is_clean || return 1
wait_for_clean || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
! TIMEOUT=1 wait_for_clean || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
repair 1.0 || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
pg_scrub 1.0 || return 1
setup $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
run_osd $dir 0 || return 1
wait_for_clean || return 1
local pgid=1.0
setup $dir || return 1
run_mon $dir a || return 1
+ run_mgr $dir x || return 1
erasure_code_plugin_exists jerasure || return 1
! erasure_code_plugin_exists FAKE || return 1
teardown $dir || return 1