From aecaebe087273cc0abac2bc737f912807cc0a8b3 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 22 May 2017 10:09:19 -0400 Subject: [PATCH] qa/workunits/rados/test_health_warning: misc fixes - use 'noup' flag - wait for healthy between each test - check counts for each type Fixes: http://tracker.ceph.com/issues/19990 Signed-off-by: Sage Weil --- qa/workunits/rados/test_health_warnings.sh | 29 +++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/qa/workunits/rados/test_health_warnings.sh b/qa/workunits/rados/test_health_warnings.sh index d9ddbbe7f8c..41598637675 100755 --- a/qa/workunits/rados/test_health_warnings.sh +++ b/qa/workunits/rados/test_health_warnings.sh @@ -7,26 +7,49 @@ crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw ceph osd setcrushmap -i crushmap ceph osd tree +wait_for_healthy() { + while ceph health | grep down + do + sleep 1 + done +} + test_mark_two_osds_same_host_down() { + ceph osd set noup ceph osd down osd.0 osd.1 ceph health detail - ceph health | grep "host" + ceph health | grep "1 host" + ceph health | grep "2 osds" ceph health detail | grep "osd.0" ceph health detail | grep "osd.1" + ceph osd unset noup + wait_for_healthy } test_mark_two_osds_same_rack_down() { + ceph osd set noup ceph osd down osd.8 osd.9 ceph health detail - ceph health | grep "rack" + ceph health | grep "1 host" + ceph health | grep "1 rack" + ceph health | grep "1 row" + ceph health | grep "2 osds" ceph health detail | grep "osd.8" ceph health detail | grep "osd.9" + ceph osd unset noup + wait_for_healthy } test_mark_all_osds_down() { + ceph osd set noup ceph osd down `ceph osd ls` ceph health detail - ceph health | grep "row" + ceph health | grep "2 rows" + ceph health | grep "3 racks" + ceph health | grep "5 hosts" + ceph health | grep "10 osds" + ceph osd unset noup + wait_for_healthy } test_mark_two_osds_same_host_down -- 2.39.5