From cf9e0da0789488928bca1f2fa9b9a62d2b1bd762 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 1 Feb 2017 16:42:16 -0500 Subject: [PATCH] qa: use fs methods for setting configs Signed-off-by: Patrick Donnelly --- qa/tasks/ceph.py | 34 +++----------------- qa/tasks/cephfs/filesystem.py | 6 ++++ qa/tasks/cephfs/test_data_scan.py | 3 +- qa/tasks/cephfs/test_failover.py | 43 +++++++++----------------- qa/tasks/cephfs/test_fragment.py | 4 +-- qa/tasks/cephfs/test_journal_repair.py | 5 ++- qa/tasks/cephfs/test_mantle.py | 5 ++- qa/tasks/cephfs/test_sessionmap.py | 5 ++- qa/tasks/cephfs/test_strays.py | 11 +++---- 9 files changed, 39 insertions(+), 77 deletions(-) diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index bcbacdc61d596..d4c9f80d47969 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -334,39 +334,15 @@ def cephfs_setup(ctx, config): if mdss.remotes: log.info('Setting up CephFS filesystem...') - Filesystem(ctx, create='cephfs') + fs = Filesystem(ctx, create='cephfs') is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles] num_active = len([r for r in all_roles if is_active_mds(r)]) - mon_remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph', 'mds', 'set', 'allow_multimds', 'true', - '--yes-i-really-mean-it'], - check_status=False, # probably old version, upgrade test - ) - mon_remote.run(args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph', - '--cluster', cluster_name, - 'mds', 'set_max_mds', str(num_active)]) - mon_remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph', 'mds', 'set', 'allow_dirfrags', 'true', - '--yes-i-really-mean-it'], - check_status=False, # probably old version, upgrade test - ) + + fs.set_allow_multimds(True) + fs.set_max_mds(num_active) + fs.set_allow_dirfrags(True) yield diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 554377e5a9dc0..0e62761274245 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -416,6 +416,12 @@ class Filesystem(MDSCluster): def set_max_mds(self, max_mds): self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "max_mds", "%d" % max_mds) + def set_allow_dirfrags(self, yes): + self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it') + + def set_allow_multimds(self, yes): + self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_multimds", str(yes).lower(), '--yes-i-really-mean-it') + def get_pgs_per_fs_pool(self): """ Calculate how many PGs to use when creating a pool, in order to avoid raising any diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index 44280398821ff..e4608714fe93a 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -426,8 +426,7 @@ class TestDataScan(CephFSTestCase): That when injecting a dentry into a fragmented directory, we put it in the right fragment. """ - self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true", - "--yes-i-really-mean-it") + self.fs.set_allow_dirfrags(True) file_count = 100 file_names = ["%s" % n for n in range(0, file_count)] diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 77d2fbdf3a778..18a35bbba04d4 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -217,10 +217,8 @@ class TestStandbyReplay(CephFSTestCase): # Create FS alpha and get mds_a to come up as active fs_a = self.mds_cluster.newfs("alpha") - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, - 'allow_multimds', "true", - "--yes-i-really-mean-it") - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2") + fs_a.set_allow_multimds(True) + fs_a.set_max_mds(2) self.mds_cluster.mds_restart(mds_a) self.wait_until_equal(lambda: fs_a.get_active_names(), [mds_a], 30) @@ -239,7 +237,7 @@ class TestStandbyReplay(CephFSTestCase): self.assertEqual(info_a_s['state'], "up:standby-replay") # Shrink the cluster - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "1") + fs_a.set_max_mds(1) fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name)) self.wait_until_equal( lambda: fs_a.get_active_names(), [mds_a], @@ -374,32 +372,27 @@ class TestMultiFilesystems(CephFSTestCase): def test_grow_shrink(self): # Usual setup... fs_a, fs_b = self._setup_two() - fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name, - "allow_multimds", "true", - "--yes-i-really-mean-it") - - fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name, - "allow_multimds", "true", - "--yes-i-really-mean-it") + fs_a.set_allow_multimds(True) + fs_b.set_allow_multimds(True) # Increase max_mds on fs_b, see a standby take up the role - fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "2") + fs_b.set_max_mds(2) self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30, reject_fn=lambda v: v > 2 or v < 1) # Increase max_mds on fs_a, see a standby take up the role - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2") + fs_a.set_max_mds(2) self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30, reject_fn=lambda v: v > 2 or v < 1) # Shrink fs_b back to 1, see a daemon go back to standby - fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "1") - fs_b.mon_manager.raw_cluster_cmd('mds', 'deactivate', "{0}:1".format(fs_b.name)) + fs_b.set_max_mds(1) + fs_b.deactivate(1) self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30, reject_fn=lambda v: v > 2 or v < 1) # Grow fs_a up to 3, see the former fs_b daemon join it. - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "3") + fs_a.set_max_mds(3) self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60, reject_fn=lambda v: v > 3 or v < 2) @@ -537,19 +530,13 @@ class TestMultiFilesystems(CephFSTestCase): # Create two filesystems which should have two ranks each fs_a = self.mds_cluster.newfs("alpha") - fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name, - "allow_multimds", "true", - "--yes-i-really-mean-it") + fs_a.set_allow_multimds(True) fs_b = self.mds_cluster.newfs("bravo") - fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name, - "allow_multimds", "true", - "--yes-i-really-mean-it") - - fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, - 'max_mds', "2") - fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, - 'max_mds', "2") + fs_b.set_allow_multimds(True) + + fs_a.set_max_mds(2) + fs_b.set_max_mds(2) # Set all the daemons to have a FSCID assignment but no other # standby preferences. diff --git a/qa/tasks/cephfs/test_fragment.py b/qa/tasks/cephfs/test_fragment.py index 62412470f12ba..81b7ec630e6e8 100644 --- a/qa/tasks/cephfs/test_fragment.py +++ b/qa/tasks/cephfs/test_fragment.py @@ -38,9 +38,7 @@ class TestFragmentation(CephFSTestCase): for k, v in kwargs.items(): self.ceph_cluster.set_ceph_conf("mds", k, v.__str__()) - self.fs.mon_manager.raw_cluster_cmd("fs", "set", self.fs.name, - "allow_dirfrags", "true", - "--yes-i-really-mean-it") + self.fs.set_allow_dirfrags(True) self.mds_cluster.mds_fail_restart() self.fs.wait_for_daemons() diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py index 4b108061d41f8..8496b144e1e46 100644 --- a/qa/tasks/cephfs/test_journal_repair.py +++ b/qa/tasks/cephfs/test_journal_repair.py @@ -160,9 +160,8 @@ class TestJournalRepair(CephFSTestCase): """ # Set max_mds to 2 - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds", - "true", "--yes-i-really-mean-it") - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2") + self.fs.set_allow_multimds(True) + self.fs.set_max_mds(2) # See that we have two active MDSs self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, diff --git a/qa/tasks/cephfs/test_mantle.py b/qa/tasks/cephfs/test_mantle.py index 723af47b118df..8e0526332e65e 100644 --- a/qa/tasks/cephfs/test_mantle.py +++ b/qa/tasks/cephfs/test_mantle.py @@ -9,9 +9,8 @@ success = "mantle balancer version changed: " class TestMantle(CephFSTestCase): def start_mantle(self): self.wait_for_health_clear(timeout=30) - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds", - "true", "--yes-i-really-mean-it") - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2") + self.fs.set_allow_multimds(True) + self.fs.set_max_mds(2) self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, reject_fn=lambda v: v > 2 or v < 1) diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index 84abbaaf5663e..e9b4b646d8f18 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -99,9 +99,8 @@ class TestSessionMap(CephFSTestCase): self.fs.wait_for_daemons() # I would like two MDSs, so that I can do an export dir later - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds", - "true", "--yes-i-really-mean-it") - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2") + self.fs.set_allow_multimds(True) + self.fs.set_max_mds(2) self.fs.wait_for_daemons() active_mds_names = self.fs.get_active_names() diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 467932b6e4fdc..7166725d39d7d 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -415,9 +415,8 @@ class TestStrays(CephFSTestCase): """ # Set up two MDSs - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds", - "true", "--yes-i-really-mean-it") - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2") + self.fs.set_allow_multimds(True) + self.fs.set_max_mds(2) # See that we have two active MDSs self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, @@ -486,8 +485,8 @@ class TestStrays(CephFSTestCase): self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024)) # Shut down rank 1 - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1") - self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1") + self.fs.set_max_mds(1) + self.fs.deactivate(1) # Wait til we get to a single active MDS mdsmap state def is_stopped(): @@ -693,7 +692,7 @@ class TestStrays(CephFSTestCase): That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged. """ - self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true", "--yes-i-really-mean-it") + self.fs.set_allow_dirfrags(True) LOW_LIMIT = 50 for mds in self.fs.get_daemon_names(): -- 2.39.5