From: Mykola Golub Date: Wed, 28 Jun 2023 07:00:23 +0000 (+0100) Subject: qa: add cephfs test for refuse_standby_for_another_fs flag X-Git-Tag: v18.2.4~396^2~2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=505418a351573859990eaca672b2449a92ac7007;p=ceph.git qa: add cephfs test for refuse_standby_for_another_fs flag Signed-off-by: Mykola Golub (cherry picked from commit c4da777c6ffc3dc7186097df9a09ac902adac5ec) --- diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 777ba8249ec89..6541b70b76ac2 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -615,6 +615,9 @@ class Filesystem(MDSCluster): def set_refuse_client_session(self, yes): self.set_var("refuse_client_session", yes) + def set_refuse_standby_for_another_fs(self, yes): + self.set_var("refuse_standby_for_another_fs", yes) + def compat(self, *args): a = map(lambda x: str(x).lower(), args) self.mon_manager.raw_cluster_cmd("fs", "compat", self.name, *a) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 68483d3308276..9461c558cb987 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -160,6 +160,31 @@ class TestClusterAffinity(CephFSTestCase): log.info("Waiting for former active to reclaim its spot") self.wait_until_true(reclaimed, timeout=self.fs.beacon_timeout) + def test_join_fs_last_resort_refused(self): + """ + That a standby with mds_join_fs set to another fs is not used if refuse_standby_for_another_fs is set. + """ + status, target = self._verify_init() + standbys = [info['name'] for info in status.get_standbys()] + for mds in standbys: + self.config_set('mds.'+mds, 'mds_join_fs', 'cephfs2') + fs2 = self.mds_cluster.newfs(name="cephfs2") + for mds in standbys: + self._change_target_state(target, mds, {'join_fscid': fs2.id}) + self.fs.set_refuse_standby_for_another_fs(True) + self.fs.rank_fail() + status = self.fs.status() + ranks = list(self.fs.get_ranks(status=status)) + self.assertTrue(len(ranks) == 0 or ranks[0]['name'] not in standbys) + + # Wait for the former active to reclaim its spot + def reclaimed(): + ranks = list(self.fs.get_ranks()) + return len(ranks) > 0 and ranks[0]['name'] not in standbys + + log.info("Waiting for former active to reclaim its spot") + self.wait_until_true(reclaimed, timeout=self.fs.beacon_timeout) + def test_join_fs_steady(self): """ That a sole MDS with mds_join_fs set will come back as active eventually even after failover.