From 6e1480058651a12da0649a5d82eb7865947a1d4d Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Thu, 30 May 2019 23:44:37 +0800 Subject: [PATCH] qa/standalone/ceph-helpers: resurrect all OSD before waiting for health address the regression introduced by e62cfceb in e62cfceb, we wanted to test the newly introduced TOO_FEW_OSDS warning, so we increased the number of OSD to the size of pool, so if the number of OSD is less than pool size, monitor will send a warning message. but we need to bring all OSDs back if we are expecting a healthy cluster. in this change, all OSDs are resurrect before `wait_for_health_ok`. Signed-off-by: Kefu Chai (cherry picked from commit cdba0f14201a327e824b6ec48e014233281dee2d) --- qa/standalone/ceph-helpers.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh index 5d4f205dfcf06..293ae280f611e 100755 --- a/qa/standalone/ceph-helpers.sh +++ b/qa/standalone/ceph-helpers.sh @@ -1509,13 +1509,18 @@ function test_wait_for_health_ok() { setup $dir || return 1 run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1 run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1 + # start osd_pool_default_size OSDs run_osd $dir 0 || return 1 run_osd $dir 1 || return 1 run_osd $dir 2 || return 1 kill_daemons $dir TERM osd || return 1 ceph osd down 0 || return 1 + # expect TOO_FEW_OSDS warning ! TIMEOUT=1 wait_for_health_ok || return 1 + # resurrect all OSDs activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 wait_for_health_ok || return 1 teardown $dir || return 1 } -- 2.39.5