]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: only restart MDS between tests 32912/head
authorPatrick Donnelly <pdonnell@redhat.com>
Tue, 7 Jan 2020 20:51:38 +0000 (12:51 -0800)
committerNathan Cutler <ncutler@suse.com>
Mon, 3 Feb 2020 15:29:53 +0000 (16:29 +0100)
"fs fail" will only fail the MDS that are part of the file system which
will generally allow us to avoid spurious MDS_INSUFFICIENT_STANDBY
warnings. Further, only restart the MDS, there's no reason to leave them
offline.

Fixes: https://tracker.ceph.com/issues/43514
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
(cherry picked from commit 423dcb4c5470b48cac096b814d270c5c2c918c43)

qa/tasks/cephfs/cephfs_test_case.py

index 53713a3896c7ba1de653cd58b34b3d348f3f8115..0a6acf9dcfa30b5e9f15b965f27b12e44cdac019 100644 (file)
@@ -98,9 +98,8 @@ class CephFSTestCase(CephTestCase):
 
         # To avoid any issues with e.g. unlink bugs, we destroy and recreate
         # the filesystem rather than just doing a rm -rf of files
-        self.mds_cluster.mds_stop()
-        self.mds_cluster.mds_fail()
         self.mds_cluster.delete_all_filesystems()
+        self.mds_cluster.mds_restart() # to reset any run-time configs, etc.
         self.fs = None # is now invalid!
         self.recovery_fs = None
 
@@ -131,7 +130,6 @@ class CephFSTestCase(CephTestCase):
 
         if self.REQUIRE_FILESYSTEM:
             self.fs = self.mds_cluster.newfs(create=True)
-            self.fs.mds_restart()
 
             # In case some test messed with auth caps, reset them
             for client_id in client_mount_ids:
@@ -141,7 +139,7 @@ class CephFSTestCase(CephTestCase):
                     'mon', 'allow r',
                     'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
 
-            # wait for mds restart to complete...
+            # wait for ranks to become active
             self.fs.wait_for_daemons()
 
             # Mount the requested number of clients