]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: use fs methods for setting configs
authorPatrick Donnelly <pdonnell@redhat.com>
Wed, 1 Feb 2017 21:42:16 +0000 (16:42 -0500)
committerPatrick Donnelly <pdonnell@redhat.com>
Mon, 6 Feb 2017 19:07:13 +0000 (14:07 -0500)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/ceph.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_fragment.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_mantle.py
qa/tasks/cephfs/test_sessionmap.py
qa/tasks/cephfs/test_strays.py

index bcbacdc61d5966cdc61e68c435273f8f19028959..d4c9f80d47969130d6343bee80a36e31d4f0f2b5 100644 (file)
@@ -334,39 +334,15 @@ def cephfs_setup(ctx, config):
     if mdss.remotes:
         log.info('Setting up CephFS filesystem...')
 
-        Filesystem(ctx, create='cephfs')
+        fs = Filesystem(ctx, create='cephfs')
 
         is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
         all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
         num_active = len([r for r in all_roles if is_active_mds(r)])
-        mon_remote.run(
-            args=[
-                'sudo',
-                'adjust-ulimits',
-                'ceph-coverage',
-                coverage_dir,
-                'ceph', 'mds', 'set', 'allow_multimds', 'true',
-                '--yes-i-really-mean-it'],
-           check_status=False,  # probably old version, upgrade test
-        )
-        mon_remote.run(args=[
-            'sudo',
-            'adjust-ulimits',
-            'ceph-coverage',
-            coverage_dir,
-            'ceph',
-            '--cluster', cluster_name,
-            'mds', 'set_max_mds', str(num_active)])
-        mon_remote.run(
-            args=[
-                'sudo',
-                'adjust-ulimits',
-                'ceph-coverage',
-                coverage_dir,
-                'ceph', 'mds', 'set', 'allow_dirfrags', 'true',
-                '--yes-i-really-mean-it'],
-           check_status=False,  # probably old version, upgrade test
-        )
+
+        fs.set_allow_multimds(True)
+        fs.set_max_mds(num_active)
+        fs.set_allow_dirfrags(True)
 
     yield
 
index 554377e5a9dc0222b0bdd157c14f65f5dcd46405..0e627612742456f8d6fd6650e1621db58fe48942 100644 (file)
@@ -416,6 +416,12 @@ class Filesystem(MDSCluster):
     def set_max_mds(self, max_mds):
         self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "max_mds", "%d" % max_mds)
 
+    def set_allow_dirfrags(self, yes):
+        self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it')
+
+    def set_allow_multimds(self, yes):
+        self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_multimds", str(yes).lower(), '--yes-i-really-mean-it')
+
     def get_pgs_per_fs_pool(self):
         """
         Calculate how many PGs to use when creating a pool, in order to avoid raising any
index 44280398821ffc11df842f311f072c0b43523c6f..e4608714fe93af8d518baed9e85766400b607db1 100644 (file)
@@ -426,8 +426,7 @@ class TestDataScan(CephFSTestCase):
         That when injecting a dentry into a fragmented directory, we put it in the right fragment.
         """
 
-        self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true",
-                                            "--yes-i-really-mean-it")
+        self.fs.set_allow_dirfrags(True)
 
         file_count = 100
         file_names = ["%s" % n for n in range(0, file_count)]
index 77d2fbdf3a778228bc9e9a9357d20165e5d4e3fa..18a35bbba04d4e7ea1cb0ab70c93e514e67348f7 100644 (file)
@@ -217,10 +217,8 @@ class TestStandbyReplay(CephFSTestCase):
 
         # Create FS alpha and get mds_a to come up as active
         fs_a = self.mds_cluster.newfs("alpha")
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name,
-                                         'allow_multimds', "true",
-                                         "--yes-i-really-mean-it")
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2")
+        fs_a.set_allow_multimds(True)
+        fs_a.set_max_mds(2)
 
         self.mds_cluster.mds_restart(mds_a)
         self.wait_until_equal(lambda: fs_a.get_active_names(), [mds_a], 30)
@@ -239,7 +237,7 @@ class TestStandbyReplay(CephFSTestCase):
         self.assertEqual(info_a_s['state'], "up:standby-replay")
 
         # Shrink the cluster
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "1")
+        fs_a.set_max_mds(1)
         fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name))
         self.wait_until_equal(
             lambda: fs_a.get_active_names(), [mds_a],
@@ -374,32 +372,27 @@ class TestMultiFilesystems(CephFSTestCase):
     def test_grow_shrink(self):
         # Usual setup...
         fs_a, fs_b = self._setup_two()
-        fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
-                                         "allow_multimds", "true",
-                                         "--yes-i-really-mean-it")
-
-        fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
-                                         "allow_multimds", "true",
-                                         "--yes-i-really-mean-it")
+        fs_a.set_allow_multimds(True)
+        fs_b.set_allow_multimds(True)
 
         # Increase max_mds on fs_b, see a standby take up the role
-        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "2")
+        fs_b.set_max_mds(2)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30,
                               reject_fn=lambda v: v > 2 or v < 1)
 
         # Increase max_mds on fs_a, see a standby take up the role
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "2")
+        fs_a.set_max_mds(2)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30,
                               reject_fn=lambda v: v > 2 or v < 1)
 
         # Shrink fs_b back to 1, see a daemon go back to standby
-        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name, 'max_mds', "1")
-        fs_b.mon_manager.raw_cluster_cmd('mds', 'deactivate', "{0}:1".format(fs_b.name))
+        fs_b.set_max_mds(1)
+        fs_b.deactivate(1)
         self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30,
                               reject_fn=lambda v: v > 2 or v < 1)
 
         # Grow fs_a up to 3, see the former fs_b daemon join it.
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name, 'max_mds', "3")
+        fs_a.set_max_mds(3)
         self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60,
                               reject_fn=lambda v: v > 3 or v < 2)
 
@@ -537,19 +530,13 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Create two filesystems which should have two ranks each
         fs_a = self.mds_cluster.newfs("alpha")
-        fs_a.mon_manager.raw_cluster_cmd("fs", "set", fs_a.name,
-                                         "allow_multimds", "true",
-                                         "--yes-i-really-mean-it")
+        fs_a.set_allow_multimds(True)
 
         fs_b = self.mds_cluster.newfs("bravo")
-        fs_b.mon_manager.raw_cluster_cmd("fs", "set", fs_b.name,
-                                         "allow_multimds", "true",
-                                         "--yes-i-really-mean-it")
-
-        fs_a.mon_manager.raw_cluster_cmd('fs', 'set', fs_a.name,
-                                         'max_mds', "2")
-        fs_b.mon_manager.raw_cluster_cmd('fs', 'set', fs_b.name,
-                                         'max_mds', "2")
+        fs_b.set_allow_multimds(True)
+
+        fs_a.set_max_mds(2)
+        fs_b.set_max_mds(2)
 
         # Set all the daemons to have a FSCID assignment but no other
         # standby preferences.
index 62412470f12bac206e35dc45176c5c899f7528d6..81b7ec630e6e8f98b193b1b2ff7abe80df8fcb48 100644 (file)
@@ -38,9 +38,7 @@ class TestFragmentation(CephFSTestCase):
         for k, v in kwargs.items():
             self.ceph_cluster.set_ceph_conf("mds", k, v.__str__())
 
-        self.fs.mon_manager.raw_cluster_cmd("fs", "set", self.fs.name,
-                                            "allow_dirfrags", "true",
-                                            "--yes-i-really-mean-it")
+        self.fs.set_allow_dirfrags(True)
 
         self.mds_cluster.mds_fail_restart()
         self.fs.wait_for_daemons()
index 4b108061d41f890c35dfca3e1c4f4e7e86ddbeea..8496b144e1e46a3bee3124160439be0689f10ea7 100644 (file)
@@ -160,9 +160,8 @@ class TestJournalRepair(CephFSTestCase):
         """
 
         # Set max_mds to 2
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
-                                                   "true", "--yes-i-really-mean-it")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+        self.fs.set_allow_multimds(True)
+        self.fs.set_max_mds(2)
 
         # See that we have two active MDSs
         self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
index 723af47b118df9c5fb3f6c4bb98edb72856d7e63..8e0526332e65ef5592e0287cb56c62891c2ce213 100644 (file)
@@ -9,9 +9,8 @@ success = "mantle balancer version changed: "
 class TestMantle(CephFSTestCase):
     def start_mantle(self):
         self.wait_for_health_clear(timeout=30)
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
-                                                   "true", "--yes-i-really-mean-it")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+        self.fs.set_allow_multimds(True)
+        self.fs.set_max_mds(2)
         self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
                               reject_fn=lambda v: v > 2 or v < 1)
 
index 84abbaaf5663eed37e0f249c0f83cc5a0105a6b8..e9b4b646d8f184ac3638c15d64044303c6e39af2 100644 (file)
@@ -99,9 +99,8 @@ class TestSessionMap(CephFSTestCase):
         self.fs.wait_for_daemons()
 
         # I would like two MDSs, so that I can do an export dir later
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
-                                                   "true", "--yes-i-really-mean-it")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+        self.fs.set_allow_multimds(True)
+        self.fs.set_max_mds(2)
         self.fs.wait_for_daemons()
 
         active_mds_names = self.fs.get_active_names()
index 467932b6e4fdc82929d357352b3dd839c07ce307..7166725d39d7d773616243383ad5df7c5843a8d9 100644 (file)
@@ -415,9 +415,8 @@ class TestStrays(CephFSTestCase):
         """
 
         # Set up two MDSs
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "allow_multimds",
-                                                   "true", "--yes-i-really-mean-it")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "2")
+        self.fs.set_allow_multimds(True)
+        self.fs.set_max_mds(2)
 
         # See that we have two active MDSs
         self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30,
@@ -486,8 +485,8 @@ class TestStrays(CephFSTestCase):
         self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024))
 
         # Shut down rank 1
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1")
-        self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1")
+        self.fs.set_max_mds(1)
+        self.fs.deactivate(1)
 
         # Wait til we get to a single active MDS mdsmap state
         def is_stopped():
@@ -693,7 +692,7 @@ class TestStrays(CephFSTestCase):
         That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged.
         """
 
-        self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_dirfrags", "true", "--yes-i-really-mean-it")
+        self.fs.set_allow_dirfrags(True)
 
         LOW_LIMIT = 50
         for mds in self.fs.get_daemon_names():