From: Kefu Chai Date: Thu, 30 May 2019 15:44:37 +0000 (+0800) Subject: qa/standalone/ceph-helpers: resurrect all OSD before waiting for health X-Git-Tag: v14.2.5~183^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=716ed2dcc5aefdfa25ba8e0b7bf16f5db499743e;p=ceph.git qa/standalone/ceph-helpers: resurrect all OSD before waiting for health address the regression introduced by e62cfceb in e62cfceb, we wanted to test the newly introduced TOO_FEW_OSDS warning, so we increased the number of OSD to the size of pool, so if the number of OSD is less than pool size, monitor will send a warning message. but we need to bring all OSDs back if we are expecting a healthy cluster. in this change, all OSDs are resurrect before `wait_for_health_ok`. Signed-off-by: Kefu Chai (cherry picked from commit cdba0f14201a327e824b6ec48e014233281dee2d) --- diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh index 6ebcd14a2cc1..90c5c6bc09ba 100755 --- a/qa/standalone/ceph-helpers.sh +++ b/qa/standalone/ceph-helpers.sh @@ -1621,13 +1621,18 @@ function test_wait_for_health_ok() { setup $dir || return 1 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1 + # start osd_pool_default_size OSDs run_osd $dir 0 || return 1 run_osd $dir 1 || return 1 run_osd $dir 2 || return 1 kill_daemons $dir TERM osd || return 1 ceph osd down 0 || return 1 + # expect TOO_FEW_OSDS warning ! TIMEOUT=1 wait_for_health_ok || return 1 + # resurrect all OSDs activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 wait_for_health_ok || return 1 teardown $dir || return 1 }