]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: only restart MDS between tests 32532/head
authorPatrick Donnelly <pdonnell@redhat.com>
Tue, 7 Jan 2020 20:51:38 +0000 (12:51 -0800)
committerPatrick Donnelly <pdonnell@redhat.com>
Wed, 8 Jan 2020 02:47:23 +0000 (18:47 -0800)
"fs fail" will only fail the MDS that are part of the file system which
will generally allow us to avoid spurious MDS_INSUFFICIENT_STANDBY
warnings. Further, only restart the MDS, there's no reason to leave them
offline.

Fixes: https://tracker.ceph.com/issues/43514
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/cephfs_test_case.py

index 6419d9044da2cf32ad8b2b99ce50e1201b253c1e..421284a9e5bd27240b5939842bb842bddeb075dd 100644 (file)
@@ -97,9 +97,8 @@ class CephFSTestCase(CephTestCase):
 
         # To avoid any issues with e.g. unlink bugs, we destroy and recreate
         # the filesystem rather than just doing a rm -rf of files
-        self.mds_cluster.mds_stop()
-        self.mds_cluster.mds_fail()
         self.mds_cluster.delete_all_filesystems()
+        self.mds_cluster.mds_restart() # to reset any run-time configs, etc.
         self.fs = None # is now invalid!
         self.recovery_fs = None
 
@@ -130,7 +129,6 @@ class CephFSTestCase(CephTestCase):
 
         if self.REQUIRE_FILESYSTEM:
             self.fs = self.mds_cluster.newfs(create=True)
-            self.fs.mds_restart()
 
             # In case some test messed with auth caps, reset them
             for client_id in client_mount_ids:
@@ -140,7 +138,7 @@ class CephFSTestCase(CephTestCase):
                     'mon', 'allow r',
                     'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
 
-            # wait for mds restart to complete...
+            # wait for ranks to become active
             self.fs.wait_for_daemons()
 
             # Mount the requested number of clients