From 4dd5aee430e1e40ac13794ceb4f4344453cbfb13 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Tue, 7 Jan 2020 12:51:38 -0800 Subject: [PATCH] qa: only restart MDS between tests "fs fail" will only fail the MDS that are part of the file system which will generally allow us to avoid spurious MDS_INSUFFICIENT_STANDBY warnings. Further, only restart the MDS, there's no reason to leave them offline. Fixes: https://tracker.ceph.com/issues/43514 Signed-off-by: Patrick Donnelly (cherry picked from commit 423dcb4c5470b48cac096b814d270c5c2c918c43) --- qa/tasks/cephfs/cephfs_test_case.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 53713a3896c7b..0a6acf9dcfa30 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -98,9 +98,8 @@ class CephFSTestCase(CephTestCase): # To avoid any issues with e.g. unlink bugs, we destroy and recreate # the filesystem rather than just doing a rm -rf of files - self.mds_cluster.mds_stop() - self.mds_cluster.mds_fail() self.mds_cluster.delete_all_filesystems() + self.mds_cluster.mds_restart() # to reset any run-time configs, etc. self.fs = None # is now invalid! self.recovery_fs = None @@ -131,7 +130,6 @@ class CephFSTestCase(CephTestCase): if self.REQUIRE_FILESYSTEM: self.fs = self.mds_cluster.newfs(create=True) - self.fs.mds_restart() # In case some test messed with auth caps, reset them for client_id in client_mount_ids: @@ -141,7 +139,7 @@ class CephFSTestCase(CephTestCase): 'mon', 'allow r', 'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name())) - # wait for mds restart to complete... + # wait for ranks to become active self.fs.wait_for_daemons() # Mount the requested number of clients -- 2.39.5