]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/cephfs: test that failing a FS is possible when other MDS/FS has...
authorRishabh Dave <ridave@redhat.com>
Wed, 29 Jan 2025 13:07:44 +0000 (18:37 +0530)
committerRishabh Dave <ridave@redhat.com>
Thu, 6 Feb 2025 16:35:28 +0000 (22:05 +0530)
health warnings.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/test_admin.py

index bf9acc407deab8f61fefc7a1fb284b0e637d0c3d..b7133f3263a22d4dc9c33b5fa51c3c0fc7cf990e 100644 (file)
@@ -2554,7 +2554,7 @@ class TestPermErrMsg(CephFSTestCase):
 class TestFSFail(TestAdminCommands):
 
     MDSS_REQUIRED = 2
-    CLIENTS_REQUIRED = 1
+    CLIENTS_REQUIRED = 2
 
     def test_with_health_warn_cache_oversized(self):
         '''
@@ -2615,6 +2615,48 @@ class TestFSFail(TestAdminCommands):
         self.fs.set_joinable()
         self.fs.wait_for_daemons()
 
+    def test_when_other_FS_has_warn_TRIM(self):
+        '''
+        Test that "fs fail" runs successfully for an FS when a MDS which is
+        active for a different FS has health warning MDS_TRIM.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # generates health warning for self.fs1
+        self.gen_health_warn_mds_trim()
+
+        # actual testing begins now.
+        self.run_ceph_cmd(f'fs fail {self.fs2.name}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
+    def test_when_other_FS_has_warn_CACHE_OVERSIZED(self):
+        '''
+        Test that "fs fail" runs successfully for an FS when a MDS which is
+        active for a different FS) has health warning MDS_CACHE_OVERSIZED.
+        '''
+        self.fs1 = self.fs
+
+        self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+        self.mount_b.remount(fsname=self.fs2.name)
+        self.mount_b.wait_until_mounted()
+
+        # actual testing begins now.
+        self.gen_health_warn_mds_cache_oversized(fs=self.fs1)
+        self.run_ceph_cmd(f'fs fail {self.fs2.name}')
+
+        # Bring and wait for MDS to be up since it is needed for unmounting
+        # of CephFS in CephFSTestCase.tearDown() to be successful.
+        self.fs.set_joinable()
+        self.fs.wait_for_daemons()
+
 
 class TestMDSFail(TestAdminCommands):
 
@@ -2979,4 +3021,3 @@ class TestToggleVolumes(CephFSTestCase):
         # plugin is reported properly by "ceph mgr module ls" command, check if
         # it is also working fine.
         self.run_ceph_cmd('fs volume ls')
-