From 620a722da3f17a092f55fe63fd51e1078d374ca4 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 6 Oct 2014 19:06:20 +0100 Subject: [PATCH] qa: fixup cephtool test when MDS exists We added MDS resetting code here a while back, but the order of operations was such that a "cluster up" was being run between a fail_all_mds and the point at which we needed the map not to be interfered with (testing setmap). Also the new fs create/destroy cycles for testing EC pool handling were missing calls to stop the daemons before fs rm. Signed-off-by: John Spray --- qa/workunits/cephtool/test.sh | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 4831e310ba366..c50a313a2e10f 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -490,6 +490,12 @@ function test_mon_mds() ceph osd pool create fs_metadata 10 ceph fs new cephfs fs_metadata fs_data + ceph mds cluster_down + ceph mds cluster_up + + ceph mds compat rm_incompat 4 + ceph mds compat rm_incompat 4 + # We don't want any MDSs to be up, their activity can interfere with # the "current_epoch + 1" checking below if they're generating updates fail_all_mds @@ -498,12 +504,6 @@ function test_mon_mds() ceph osd dump | grep fs_data > $TMPFILE check_response "crash_replay_interval 45 " - ceph mds cluster_down - ceph mds cluster_up - - ceph mds compat rm_incompat 4 - ceph mds compat rm_incompat 4 - ceph mds compat show expect_false ceph mds deactivate 2 ceph mds dump @@ -598,6 +598,7 @@ function test_mon_mds() check_response 'in use by CephFS' $? 16 set -e + fail_all_mds ceph fs rm cephfs --yes-i-really-mean-it # ... but we should be forbidden from using the cache pool in the FS directly. @@ -630,6 +631,7 @@ function test_mon_mds() check_response 'in use by CephFS' $? 16 set -e + fail_all_mds ceph fs rm cephfs --yes-i-really-mean-it -- 2.39.5