]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/cephfs: test that a MDS can be failed when other FS/MDS has a...
authorRishabh Dave <ridave@redhat.com>
Tue, 28 Jan 2025 17:45:28 +0000 (23:15 +0530)
committerRishabh Dave <ridave@redhat.com>
Mon, 15 Sep 2025 06:24:36 +0000 (11:54 +0530)
health warning.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit 95a812a5f5c058bcab077790b5edfc8985d0c485)

Conflicts:
qa/tasks/cephfs/test_admin.py
- The region where the patch has to be applied is different in reef
  branch compared to the main branch.

Conflicts:
qa/tasks/cephfs/test_admin.py
- This file is slightly different from main which prevented seamless
  rebase.

qa/tasks/cephfs/test_admin.py

index 2c6ea664d7419a365d2ae669dab12dcb85336566..88d96688ed9cc1bf5242aed4c4ad46107b5024fd 100644 (file)
@@ -2095,7 +2095,7 @@ class TestFSFail(TestAdminCommands):
 class TestMDSFail(TestAdminCommands):
 
     MDSS_REQUIRED = 2
-    CLIENTS_REQUIRED = 1
+    CLIENTS_REQUIRED = 2
 
     def test_with_health_warn_cache_oversized(self):
         '''
@@ -2173,6 +2173,51 @@ class TestMDSFail(TestAdminCommands):
         self.run_ceph_cmd(f'mds fail {mds1_id} --yes-i-really-mean-it')
         self.run_ceph_cmd(f'mds fail {mds2_id} --yes-i-really-mean-it')
 
+    def test_when_other_MDS_has_warn_TRIM(self):
+        '''
+        Test that "mds fail" runs successfully for a MDS when a MDS which is
+        active for a different FS has health warning MDS_TRIM.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # generates health warning for self.fs1
+        self.gen_health_warn_mds_trim()
+
+        active_mds_id = self.fs2.get_active_names()[0]
+        # actual testing begins now.
+        self.run_ceph_cmd(f'mds fail {active_mds_id}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
+    def test_when_other_MDS_has_warn_CACHE_OVERSIZED(self):
+        '''
+        Test that "mds fail" runs successfully for a MDS when a MDS which is
+        active for a different FS has health warning MDS_CACHE_OVERSIZED.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # actual testing begins now.
+        mds_id_for_fs1 = self.fs1.get_active_names()[0]
+        self.gen_health_warn_mds_cache_oversized(mds_id=mds_id_for_fs1)
+        mds_id_for_fs2 = self.fs2.get_active_names()[0]
+        self.run_ceph_cmd(f'mds fail {mds_id_for_fs2}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
 
 class TestToggleVolumes(CephFSTestCase):
     '''