From: Patrick Donnelly Date: Tue, 7 Jan 2020 20:51:38 +0000 (-0800) Subject: qa: only restart MDS between tests X-Git-Tag: v14.2.8~57^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F32912%2Fhead;p=ceph.git qa: only restart MDS between tests "fs fail" will only fail the MDS that are part of the file system which will generally allow us to avoid spurious MDS_INSUFFICIENT_STANDBY warnings. Further, only restart the MDS, there's no reason to leave them offline. Fixes: https://tracker.ceph.com/issues/43514 Signed-off-by: Patrick Donnelly (cherry picked from commit 423dcb4c5470b48cac096b814d270c5c2c918c43) --- diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 53713a3896c7..0a6acf9dcfa3 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -98,9 +98,8 @@ class CephFSTestCase(CephTestCase): # To avoid any issues with e.g. unlink bugs, we destroy and recreate # the filesystem rather than just doing a rm -rf of files - self.mds_cluster.mds_stop() - self.mds_cluster.mds_fail() self.mds_cluster.delete_all_filesystems() + self.mds_cluster.mds_restart() # to reset any run-time configs, etc. self.fs = None # is now invalid! self.recovery_fs = None @@ -131,7 +130,6 @@ class CephFSTestCase(CephTestCase): if self.REQUIRE_FILESYSTEM: self.fs = self.mds_cluster.newfs(create=True) - self.fs.mds_restart() # In case some test messed with auth caps, reset them for client_id in client_mount_ids: @@ -141,7 +139,7 @@ class CephFSTestCase(CephTestCase): 'mon', 'allow r', 'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name())) - # wait for mds restart to complete... + # wait for ranks to become active self.fs.wait_for_daemons() # Mount the requested number of clients