From 0cd02684214bdc6f41e18297e91427e9b121c829 Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 9 Jul 2014 12:43:04 +0100 Subject: [PATCH] qa: generalise cephtool for vstart+MDS Previously this test assumed no pre-existing filesystem and no MDS running. Generalize it to nuke any existing filesystems found before running, so that you can use it inside a vstart cluster that had MDS>0. Signed-off-by: John Spray --- qa/workunits/cephtool/test.sh | 33 ++++++++++++++++++++++++++++----- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index b7f1c209bd38d..f8d13947eeb23 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -3,7 +3,7 @@ set -e set -o functrace PS4=' ${FUNCNAME[0]}: $LINENO: ' -SUDO=sudo +SUDO=${SUDO:-sudo} function get_pg() { @@ -330,12 +330,34 @@ function test_mon_misc() } +function fail_all_mds() +{ + ceph mds cluster_down + mds_gids=`ceph mds dump | grep up: | while read line ; do echo $line | awk '{print substr($1, 0, length($1)-1);}' ; done` + for mds_gid in $mds_gids ; do + ceph mds fail $mds_gid + done +} + function test_mon_mds() { + existing_fs=$(ceph fs ls | grep "name:" | awk '{print substr($2,0,length($2)-1);}') + num_mds=$(ceph mds stat | awk '{print $2;}' | cut -f1 -d'/') + if [ -n "$existing_fs" ] ; then + fail_all_mds + echo "Removing existing filesystem '${existing_fs}'..." + ceph fs rm $existing_fs --yes-i-really-mean-it + echo "Removed '${existing_fs}'." + fi + ceph osd pool create fs_data 10 ceph osd pool create fs_metadata 10 ceph fs new cephfs fs_metadata fs_data + # We don't want any MDSs to be up, their activity can interfere with + # the "current_epoch + 1" checking below if they're generating updates + fail_all_mds + # Check for default crash_replay_interval set automatically in 'fs new' ceph osd dump | grep fs_data > $TMPFILE check_response "crash_replay_interval 45 " @@ -398,13 +420,14 @@ function test_mon_mds() ceph mds add_data_pool mds-ec-pool 2>$TMPFILE check_response 'erasure-code' $? 22 set -e - ec_poolnum=$(ceph osd dump | grep 'pool.*mds-ec-pool' | awk '{print $2;}') - data_poolnum=$(ceph osd dump | grep 'pool.*fs_data' | awk '{print $2;}') - metadata_poolnum=$(ceph osd dump | grep 'pool.*fs_metadata' | awk '{print $2;}') - set +e + ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}') + data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}') + metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}') + fail_all_mds ceph fs rm cephfs --yes-i-really-mean-it + set +e ceph mds newfs $metadata_poolnum $ec_poolnum --yes-i-really-mean-it 2>$TMPFILE check_response 'erasure-code' $? 22 ceph mds newfs $ec_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE -- 2.39.5