From d86b67ef9d3b9369c394dbe23c295c4eee7dce19 Mon Sep 17 00:00:00 2001 From: John Spray Date: Mon, 14 Nov 2016 16:57:08 +0000 Subject: [PATCH] tasks/cephfs: make test_journal_repair work with more daemons Previously this assumed it was running with exactly two MDS daemons. When there were more, it would fail to execute "fs reset" because the extra daemons were active in the map. Signed-off-by: John Spray --- tasks/cephfs/test_journal_repair.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tasks/cephfs/test_journal_repair.py b/tasks/cephfs/test_journal_repair.py index 68cac7de48103..4b108061d41f8 100644 --- a/tasks/cephfs/test_journal_repair.py +++ b/tasks/cephfs/test_journal_repair.py @@ -169,6 +169,11 @@ class TestJournalRepair(CephFSTestCase): reject_fn=lambda v: v > 2 or v < 1) active_mds_names = self.fs.get_active_names() + # Switch off any unneeded MDS daemons + for unneeded_mds in set(self.mds_cluster.mds_ids) - set(active_mds_names): + self.mds_cluster.mds_stop(unneeded_mds) + self.mds_cluster.mds_fail(unneeded_mds) + # Do a bunch of I/O such that at least some will hit the second MDS: create # lots of directories so that the balancer should find it easy to make a decision # to allocate some of them to the second mds. -- 2.39.5