]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: add and use get_ceph_cmd_result()
authorRishabh Dave <ridave@redhat.com>
Tue, 14 Mar 2023 19:43:56 +0000 (01:13 +0530)
committerRishabh Dave <ridave@redhat.com>
Wed, 13 Mar 2024 13:42:30 +0000 (19:12 +0530)
To run a command and get its return value, instead of typing something
as long as "self.mds_cluster.mon_manager.raw_cluster_cmd_result" add a
hepler method in CephFSTestCase and use it. This makes this task very
simple - "self.get_ceph_cmd_result()".

Also, remove method CephFSTestCase.run_cluster_cmd_result() in favour of
this new method.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit 82814ac49d31b0fc48ac4f3ec659a9f0a822acd2)

qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_mantle.py
qa/tasks/cephfs/test_mds_metrics.py
qa/tasks/cephfs/test_mirroring.py
qa/tasks/cephfs/test_nfs.py
qa/tasks/cephfs/test_pool_perm.py
qa/tasks/cephfs/test_snapshots.py

index d6563a14e5429ec8eeee481f8a9307bdfbdf47c3..240c9e224f4a1638bc1b277ddd83da41057e0ee2 100644 (file)
@@ -55,6 +55,16 @@ class MountDetails():
         mntobj.hostfs_mntpt = self.hostfs_mntpt
 
 
+class RunCephCmd:
+
+    def get_ceph_cmd_result(self, *args, **kwargs):
+        if kwargs.get('args') is None and args:
+            if len(args) == 1:
+                args = args[0]
+            kwargs['args'] = args
+        return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus
+
+
 class CephFSTestCase(CephTestCase, RunCephCmd):
     """
     Test case for Ceph FS, requires caller to populate Filesystem and Mounts,
@@ -184,11 +194,11 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
                        'osd', f'allow rw tag cephfs data={self.fs.name}',
                        'mds', 'allow']
 
-                if self.run_cluster_cmd_result(cmd) == 0:
+                if self.get_ceph_cmd_result(*cmd) == 0:
                     break
 
                 cmd[1] = 'add'
-                if self.run_cluster_cmd_result(cmd) != 0:
+                if self.get_ceph_cmd_result(*cmd) != 0:
                     raise RuntimeError(f'Failed to create new client {cmd[2]}')
 
             # wait for ranks to become active
@@ -429,11 +439,6 @@ class CephFSTestCase(CephTestCase, RunCephCmd):
             cmd = shlex_split(cmd)
         return self.fs.mon_manager.raw_cluster_cmd(*cmd)
 
-    def run_cluster_cmd_result(self, cmd):
-        if isinstance(cmd, str):
-            cmd = shlex_split(cmd)
-        return self.fs.mon_manager.raw_cluster_cmd_result(*cmd)
-
     def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None):
         if not (moncap or osdcap or mdscap):
             if self.fs:
index ddcc58cccc5076565084fd670a4c5cf8ff24cb7f..0e4dd428fd6eb64535e07903ac0fb3d2a427e512 100644 (file)
@@ -701,7 +701,7 @@ class TestMultiFilesystems(CephFSTestCase):
 
         # Reconfigure client auth caps
         for mount in self.mounts:
-            self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+            self.get_ceph_cmd_result(
                 'auth', 'caps', "client.{0}".format(mount.client_id),
                 'mds', 'allow',
                 'mon', 'allow r',
index 746c2ffe371c6ae17fb0ccecea6f98b358d7de68..6a3c17d4360f694f1d95f4bf40c5f3c59384ed7a 100644 (file)
@@ -22,7 +22,7 @@ class TestMantle(CephFSTestCase):
             self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m)
 
     def push_balancer(self, obj, lua_code, expect):
-        self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
+        self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', obj)
         self.fs.radosm(["put", obj, "-"], stdin=StringIO(lua_code))
         with self.assert_cluster_log(failure + obj + " " + expect):
             log.info("run a " + obj + " balancer that expects=" + expect)
@@ -31,16 +31,16 @@ class TestMantle(CephFSTestCase):
         self.start_mantle()
         expect = " : (2) No such file or directory"
 
-        ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer')
+        ret = self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer')
         assert(ret == 22) # EINVAL
 
-        self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
+        self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', " ")
         with self.assert_cluster_log(failure + " " + expect): pass
 
     def test_version_not_in_rados(self):
         self.start_mantle()
         expect = failure + "ghost.lua : (2) No such file or directory"
-        self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
+        self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua")
         with self.assert_cluster_log(expect): pass
 
     def test_balancer_invalid(self):
@@ -59,7 +59,7 @@ class TestMantle(CephFSTestCase):
     def test_balancer_valid(self):
         self.start_mantle()
         lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}"
-        self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+        self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
         self.fs.radosm(["put", "valid.lua", "-"], stdin=StringIO(lua_code))
         with self.assert_cluster_log(success + "valid.lua"):
             log.info("run a valid.lua balancer")
@@ -96,11 +96,11 @@ class TestMantle(CephFSTestCase):
         # kill the OSDs so that the balancer pull from RADOS times out
         osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty'))
         for i in range(0, len(osd_map['osds'])):
-          self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i))
-          self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i))
+          self.get_ceph_cmd_result('osd', 'down', str(i))
+          self.get_ceph_cmd_result('osd', 'out', str(i))
 
         # trigger a pull from RADOS
-        self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
+        self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua")
 
         # make the timeout a little longer since dead OSDs spam ceph -w
         with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30):
@@ -108,4 +108,4 @@ class TestMantle(CephFSTestCase):
 
         # cleanup
         for i in range(0, len(osd_map['osds'])):
-          self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i))
+          self.get_ceph_cmd_result('osd', 'in', str(i))
index ad877f62280a7e3781fa91d366b5a31e9748789b..4fb2f969b382579d8ea993fc33bd072c0ea1b491 100644 (file)
@@ -115,7 +115,7 @@ class TestMDSMetrics(CephFSTestCase):
 
         # Reconfigure client auth caps
         for mount in self.mounts:
-            self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+            self.get_ceph_cmd_result(
                 'auth', 'caps', f"client.{mount.client_id}",
                 'mds', 'allow',
                 'mon', 'allow r',
index be8817d749be5d135d35eaee13740ca73ac0db53..66db771a6b79a4df6a7766d1fa1c258ddb1a3eaf 100644 (file)
@@ -466,12 +466,13 @@ class TestMirroring(CephFSTestCase):
 
     def test_cephfs_mirror_stats(self):
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
@@ -532,12 +533,13 @@ class TestMirroring(CephFSTestCase):
 
     def test_cephfs_mirror_cancel_sync(self):
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
@@ -568,12 +570,13 @@ class TestMirroring(CephFSTestCase):
 
     def test_cephfs_mirror_restart_sync_on_blocklist(self):
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
@@ -803,12 +806,13 @@ class TestMirroring(CephFSTestCase):
 
     def test_cephfs_mirror_symlink_sync(self):
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
@@ -940,12 +944,13 @@ class TestMirroring(CephFSTestCase):
     def test_cephfs_mirror_incremental_sync(self):
         """ Test incremental snapshot synchronization (based on mtime differences)."""
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
             'mds', 'allow rw',
             'mon', 'allow r',
             'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                self.backup_fs.get_data_pool_name(),
+                self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
         self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1018,12 +1023,13 @@ class TestMirroring(CephFSTestCase):
         file_z |   sym          dir         reg         sym
         """
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
         self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1089,12 +1095,13 @@ class TestMirroring(CephFSTestCase):
         """
 
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
             'mds', 'allow rw',
             'mon', 'allow r',
             'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                self.backup_fs.get_data_pool_name(),
+                self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
         self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
@@ -1169,12 +1176,13 @@ class TestMirroring(CephFSTestCase):
         that all replayer threads (3 by default) in the mirror daemon are busy.
         """
         log.debug('reconfigure client auth caps')
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_b.client_id),
                 'mds', 'allow rw',
                 'mon', 'allow r',
                 'osd', 'allow rw pool={0}, allow rw pool={1}'.format(
-                    self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
+                    self.backup_fs.get_data_pool_name(),
+                    self.backup_fs.get_data_pool_name()))
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
@@ -1266,7 +1274,7 @@ class TestMirroring(CephFSTestCase):
         log.debug('reconfigure client auth caps')
         cid = self.mount_b.client_id
         data_pool = self.backup_fs.get_data_pool_name()
-        self.mds_cluster.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', f"client.{cid}",
             'mds', 'allow rw',
             'mon', 'allow r',
index cebe2872de0b29cd43601b97717d5343d0c0e132..c1b4cf5db985531235ca92c35d5d6c428554815b 100644 (file)
@@ -142,7 +142,7 @@ class TestNFS(MgrTestCase):
         :param cmd_args: nfs command arguments to be run
         '''
         cmd_func()
-        ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args)
+        ret = self.get_ceph_cmd_result(*cmd_args)
         if ret != 0:
             self.fail("Idempotency test failed")
 
index 9912debedee3a1cb87daa823fcfa7480758e76d0..b55052b826e8ce66a4cfd7f84481df95bd268ca2 100644 (file)
@@ -30,9 +30,9 @@ class TestPoolPerm(CephFSTestCase):
         client_name = "client.{0}".format(self.mount_a.client_id)
 
         # set data pool read only
-        self.fs.mon_manager.raw_cluster_cmd_result(
-            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
-            'allow r pool={0}'.format(self.fs.get_data_pool_name()))
+        self.get_ceph_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+            'osd', 'allow r pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
         self.mount_a.mount_wait()
@@ -41,9 +41,9 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
 
         # set data pool write only
-        self.fs.mon_manager.raw_cluster_cmd_result(
-            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd',
-            'allow w pool={0}'.format(self.fs.get_data_pool_name()))
+        self.get_ceph_cmd_result(
+            'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r',
+            'osd', 'allow w pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
         self.mount_a.mount_wait()
@@ -66,7 +66,7 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.run_shell(["mkdir", "layoutdir"])
 
         # Set MDS 'rw' perms: missing 'p' means no setting pool layouts
-        self.fs.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r',
             'osd',
             'allow rw pool={0},allow rw pool={1}'.format(
@@ -86,7 +86,7 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.umount_wait()
 
         # Set MDS 'rwp' perms: should now be able to set layouts
-        self.fs.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r',
             'osd',
             'allow rw pool={0},allow rw pool={1}'.format(
@@ -101,7 +101,7 @@ class TestPoolPerm(CephFSTestCase):
         self.mount_a.umount_wait()
 
     def tearDown(self):
-        self.fs.mon_manager.raw_cluster_cmd_result(
+        self.get_ceph_cmd_result(
             'auth', 'caps', "client.{0}".format(self.mount_a.client_id),
             'mds', 'allow', 'mon', 'allow r', 'osd',
             'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0]))
index 608dcc81f1505927b1697a843ca5700dd55b6a8b..29dc06066ad4dbc219ea5ff866ac79da325f2253 100644 (file)
@@ -566,40 +566,40 @@ class TestMonSnapsAndFsPools(CephFSTestCase):
         """
         test_pool_name = 'snap-test-pool'
         base_cmd = f'osd pool create {test_pool_name}'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         self.assertEqual(ret, 0)
 
         self.fs.rados(["mksnap", "snap3"], pool=test_pool_name)
 
         base_cmd = f'fs add_data_pool {self.fs.name} {test_pool_name}'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         self.assertEqual(ret, errno.EOPNOTSUPP)
 
         # cleanup
         self.fs.rados(["rmsnap", "snap3"], pool=test_pool_name)
         base_cmd = f'osd pool delete {test_pool_name}'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
 
     def test_using_pool_with_snap_fails_fs_creation(self):
         """
         Test that using a pool with snaps for fs creation fails
         """
         base_cmd = 'osd pool create test_data_pool'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         self.assertEqual(ret, 0)
         base_cmd = 'osd pool create test_metadata_pool'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         self.assertEqual(ret, 0)
 
         self.fs.rados(["mksnap", "snap4"], pool='test_data_pool')
 
         base_cmd = 'fs new testfs test_metadata_pool test_data_pool'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         self.assertEqual(ret, errno.EOPNOTSUPP)
 
         # cleanup
         self.fs.rados(["rmsnap", "snap4"], pool='test_data_pool')
         base_cmd = 'osd pool delete test_data_pool'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)
         base_cmd = 'osd pool delete test_metadata_pool'
-        ret = self.run_cluster_cmd_result(base_cmd)
+        ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)