From: Rishabh Dave Date: Tue, 28 Jan 2025 17:45:28 +0000 (+0530) Subject: qa/cephfs: test that a MDS can be failed when other FS/MDS has a... X-Git-Tag: v20.0.0~19^2~6 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=95a812a5f5c058bcab077790b5edfc8985d0c485;p=ceph.git qa/cephfs: test that a MDS can be failed when other FS/MDS has a... health warning. Signed-off-by: Rishabh Dave --- diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 7f5a0dbec2d..d35c3ffde42 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -2613,7 +2613,7 @@ class TestFSFail(TestAdminCommands): class TestMDSFail(TestAdminCommands): MDSS_REQUIRED = 2 - CLIENTS_REQUIRED = 1 + CLIENTS_REQUIRED = 2 def test_with_health_warn_cache_oversized(self): ''' @@ -2691,6 +2691,51 @@ class TestMDSFail(TestAdminCommands): self.run_ceph_cmd(f'mds fail {mds1_id} --yes-i-really-mean-it') self.run_ceph_cmd(f'mds fail {mds2_id} --yes-i-really-mean-it') + def test_when_other_MDS_has_warn_TRIM(self): + ''' + Test that "mds fail" runs successfully for a MDS when a MDS which is + active for a different FS has health warning MDS_TRIM. + ''' + self.fs1 = self.fs + + self.fs2 = self.fs.newfs(name='cephfs2', create=True) + self.mount_b.remount(fsname=self.fs2.name) + self.mount_b.wait_until_mounted() + + # generates health warning for self.fs1 + self.gen_health_warn_mds_trim() + + active_mds_id = self.fs2.get_active_names()[0] + # actual testing begins now. + self.run_ceph_cmd(f'mds fail {active_mds_id}') + + # Bring and wait for MDS to be up since it is needed for unmounting + # of CephFS in CephFSTestCase.tearDown() to be successful. + self.fs.set_joinable() + self.fs.wait_for_daemons() + + def test_when_other_MDS_has_warn_CACHE_OVERSIZED(self): + ''' + Test that "mds fail" runs successfully for a MDS when a MDS which is + active for a different FS has health warning MDS_CACHE_OVERSIZED. + ''' + self.fs1 = self.fs + + self.fs2 = self.fs.newfs(name='cephfs2', create=True) + self.mount_b.remount(fsname=self.fs2.name) + self.mount_b.wait_until_mounted() + + # actual testing begins now. + mds_id_for_fs1 = self.fs1.get_active_names()[0] + self.gen_health_warn_mds_cache_oversized(mds_id=mds_id_for_fs1) + mds_id_for_fs2 = self.fs2.get_active_names()[0] + self.run_ceph_cmd(f'mds fail {mds_id_for_fs2}') + + # Bring and wait for MDS to be up since it is needed for unmounting + # of CephFS in CephFSTestCase.tearDown() to be successful. + self.fs.set_joinable() + self.fs.wait_for_daemons() + class TestFSSetMaxMDS(TestAdminCommands): @@ -2928,3 +2973,4 @@ class TestToggleVolumes(CephFSTestCase): # plugin is reported properly by "ceph mgr module ls" command, check if # it is also working fine. self.run_ceph_cmd('fs volume ls') +