From 4332eeb6946cee7c8da17f0eb2a621bb48cfba19 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 10 Feb 2020 11:20:07 -0600 Subject: [PATCH] qa/workunits/cephtool/test.sh: delete test_erasure pool Other parts of this script leave OSDs reweighted, which can make this test fail to go fully clean. 0 ssd 0.08789 osd.0 up 0.63213 1.00000 1 ssd 0.08789 osd.1 up 0.63213 1.00000 2 ssd 0.08789 osd.2 up 1.00000 1.00000 35.0 raw ([2,1,2147483647], p2) up ([2,1,2147483647], p2) acting ([2,1,2], p2) Fix by just deleting this pool when we're done. Fixes: https://tracker.ceph.com/issues/44067 Signed-off-by: Sage Weil --- qa/workunits/cephtool/test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 9a3a98b8dca..c12591d8db6 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -2186,6 +2186,7 @@ function test_mon_osd_pool_set() check_response 'not change the size' set -e ceph osd pool get pool_erasure erasure_code_profile + ceph osd pool rm pool_erasure pool_erasure --yes-i-really-really-mean-it for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do ceph osd pool set $TEST_POOL_GETSET $flag false -- 2.39.5