]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/cephfs: test that a MDS can be failed when other FS/MDS has a...
authorRishabh Dave <ridave@redhat.com>
Tue, 28 Jan 2025 17:45:28 +0000 (23:15 +0530)
committerRishabh Dave <ridave@redhat.com>
Thu, 6 Feb 2025 16:33:19 +0000 (22:03 +0530)
health warning.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/test_admin.py

index 7f5a0dbec2d1ba43a321103f877a24c4dd246cfe..d35c3ffde4251a7d5a5a8386cd3f029b1dae9dd2 100644 (file)
@@ -2613,7 +2613,7 @@ class TestFSFail(TestAdminCommands):
 class TestMDSFail(TestAdminCommands):
 
     MDSS_REQUIRED = 2
-    CLIENTS_REQUIRED = 1
+    CLIENTS_REQUIRED = 2
 
     def test_with_health_warn_cache_oversized(self):
         '''
@@ -2691,6 +2691,51 @@ class TestMDSFail(TestAdminCommands):
         self.run_ceph_cmd(f'mds fail {mds1_id} --yes-i-really-mean-it')
         self.run_ceph_cmd(f'mds fail {mds2_id} --yes-i-really-mean-it')
 
+    def test_when_other_MDS_has_warn_TRIM(self):
+        '''
+        Test that "mds fail" runs successfully for a MDS when a MDS which is
+        active for a different FS has health warning MDS_TRIM.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # generates health warning for self.fs1
+        self.gen_health_warn_mds_trim()
+
+        active_mds_id = self.fs2.get_active_names()[0]
+        # actual testing begins now.
+        self.run_ceph_cmd(f'mds fail {active_mds_id}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
+    def test_when_other_MDS_has_warn_CACHE_OVERSIZED(self):
+        '''
+        Test that "mds fail" runs successfully for a MDS when a MDS which is
+        active for a different FS has health warning MDS_CACHE_OVERSIZED.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # actual testing begins now.
+        mds_id_for_fs1 = self.fs1.get_active_names()[0]
+        self.gen_health_warn_mds_cache_oversized(mds_id=mds_id_for_fs1)
+        mds_id_for_fs2 = self.fs2.get_active_names()[0]
+        self.run_ceph_cmd(f'mds fail {mds_id_for_fs2}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
 
 class TestFSSetMaxMDS(TestAdminCommands):
 
@@ -2928,3 +2973,4 @@ class TestToggleVolumes(CephFSTestCase):
         # plugin is reported properly by "ceph mgr module ls" command, check if
         # it is also working fine.
         self.run_ceph_cmd('fs volume ls')
+