From: Kefu Chai Date: Thu, 30 May 2019 15:44:37 +0000 (+0800) Subject: qa/standalone/ceph-helpers: resurrect all OSD before waiting for health X-Git-Tag: v15.1.0~2598^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=cdba0f14201a327e824b6ec48e014233281dee2d;p=ceph-ci.git qa/standalone/ceph-helpers: resurrect all OSD before waiting for health address the regression introduced by e62cfceb in e62cfceb, we wanted to test the newly introduced TOO_FEW_OSDS warning, so we increased the number of OSD to the size of pool, so if the number of OSD is less than pool size, monitor will send a warning message. but we need to bring all OSDs back if we are expecting a healthy cluster. in this change, all OSDs are resurrect before `wait_for_health_ok`. Signed-off-by: Kefu Chai --- diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh index ba79a652f1a..52b0eee6917 100755 --- a/qa/standalone/ceph-helpers.sh +++ b/qa/standalone/ceph-helpers.sh @@ -1628,13 +1628,18 @@ function test_wait_for_health_ok() { setup $dir || return 1 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1 + # start osd_pool_default_size OSDs run_osd $dir 0 || return 1 run_osd $dir 1 || return 1 run_osd $dir 2 || return 1 kill_daemons $dir TERM osd || return 1 ceph osd down 0 || return 1 + # expect TOO_FEW_OSDS warning ! TIMEOUT=1 wait_for_health_ok || return 1 + # resurrect all OSDs activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 wait_for_health_ok || return 1 teardown $dir || return 1 }