class TestMDSFail(TestAdminCommands):
MDSS_REQUIRED = 2
- CLIENTS_REQUIRED = 1
+ CLIENTS_REQUIRED = 2
def test_with_health_warn_cache_oversized(self):
'''
self.run_ceph_cmd(f'mds fail {mds1_id} --yes-i-really-mean-it')
self.run_ceph_cmd(f'mds fail {mds2_id} --yes-i-really-mean-it')
+ def test_when_other_MDS_has_warn_TRIM(self):
+ '''
+ Test that "mds fail" runs successfully for a MDS when a MDS which is
+ active for a different FS has health warning MDS_TRIM.
+ '''
+ self.fs1 = self.fs
+
+ self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+ self.mount_b.remount(fsname=self.fs2.name)
+ self.mount_b.wait_until_mounted()
+
+ # generates health warning for self.fs1
+ self.gen_health_warn_mds_trim()
+
+ active_mds_id = self.fs2.get_active_names()[0]
+ # actual testing begins now.
+ self.run_ceph_cmd(f'mds fail {active_mds_id}')
+
+ # Bring and wait for MDS to be up since it is needed for unmounting
+ # of CephFS in CephFSTestCase.tearDown() to be successful.
+ self.fs.set_joinable()
+ self.fs.wait_for_daemons()
+
+ def test_when_other_MDS_has_warn_CACHE_OVERSIZED(self):
+ '''
+ Test that "mds fail" runs successfully for a MDS when a MDS which is
+ active for a different FS has health warning MDS_CACHE_OVERSIZED.
+ '''
+ self.fs1 = self.fs
+
+ self.fs2 = self.fs.newfs(name='cephfs2', create=True)
+ self.mount_b.remount(fsname=self.fs2.name)
+ self.mount_b.wait_until_mounted()
+
+ # actual testing begins now.
+ mds_id_for_fs1 = self.fs1.get_active_names()[0]
+ self.gen_health_warn_mds_cache_oversized(mds_id=mds_id_for_fs1)
+ mds_id_for_fs2 = self.fs2.get_active_names()[0]
+ self.run_ceph_cmd(f'mds fail {mds_id_for_fs2}')
+
+ # Bring and wait for MDS to be up since it is needed for unmounting
+ # of CephFS in CephFSTestCase.tearDown() to be successful.
+ self.fs.set_joinable()
+ self.fs.wait_for_daemons()
+
class TestToggleVolumes(CephFSTestCase):
'''