From: Rishabh Dave Date: Tue, 14 Mar 2023 19:43:56 +0000 (+0530) Subject: qa/cephfs: add and use get_ceph_cmd_result() X-Git-Tag: testing/wip-pdonnell-testing-20240430.123648-reef-debug~45^2~10 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=6efd266e469e159e06884a46823055e5cba2b266;p=ceph-ci.git qa/cephfs: add and use get_ceph_cmd_result() To run a command and get its return value, instead of typing something as long as "self.mds_cluster.mon_manager.raw_cluster_cmd_result" add a hepler method in CephFSTestCase and use it. This makes this task very simple - "self.get_ceph_cmd_result()". Also, remove method CephFSTestCase.run_cluster_cmd_result() in favour of this new method. Signed-off-by: Rishabh Dave (cherry picked from commit 82814ac49d31b0fc48ac4f3ec659a9f0a822acd2) --- diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index d6563a14e54..240c9e224f4 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -55,6 +55,16 @@ class MountDetails(): mntobj.hostfs_mntpt = self.hostfs_mntpt +class RunCephCmd: + + def get_ceph_cmd_result(self, *args, **kwargs): + if kwargs.get('args') is None and args: + if len(args) == 1: + args = args[0] + kwargs['args'] = args + return self.mon_manager.run_cluster_cmd(**kwargs).exitstatus + + class CephFSTestCase(CephTestCase, RunCephCmd): """ Test case for Ceph FS, requires caller to populate Filesystem and Mounts, @@ -184,11 +194,11 @@ class CephFSTestCase(CephTestCase, RunCephCmd): 'osd', f'allow rw tag cephfs data={self.fs.name}', 'mds', 'allow'] - if self.run_cluster_cmd_result(cmd) == 0: + if self.get_ceph_cmd_result(*cmd) == 0: break cmd[1] = 'add' - if self.run_cluster_cmd_result(cmd) != 0: + if self.get_ceph_cmd_result(*cmd) != 0: raise RuntimeError(f'Failed to create new client {cmd[2]}') # wait for ranks to become active @@ -429,11 +439,6 @@ class CephFSTestCase(CephTestCase, RunCephCmd): cmd = shlex_split(cmd) return self.fs.mon_manager.raw_cluster_cmd(*cmd) - def run_cluster_cmd_result(self, cmd): - if isinstance(cmd, str): - cmd = shlex_split(cmd) - return self.fs.mon_manager.raw_cluster_cmd_result(*cmd) - def create_client(self, client_id, moncap=None, osdcap=None, mdscap=None): if not (moncap or osdcap or mdscap): if self.fs: diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 9461c558cb9..c9ac08e9b29 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -732,7 +732,7 @@ class TestMultiFilesystems(CephFSTestCase): # Reconfigure client auth caps for mount in self.mounts: - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(mount.client_id), 'mds', 'allow', 'mon', 'allow r', diff --git a/qa/tasks/cephfs/test_mantle.py b/qa/tasks/cephfs/test_mantle.py index 746c2ffe371..6a3c17d4360 100644 --- a/qa/tasks/cephfs/test_mantle.py +++ b/qa/tasks/cephfs/test_mantle.py @@ -22,7 +22,7 @@ class TestMantle(CephFSTestCase): self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m) def push_balancer(self, obj, lua_code, expect): - self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj) + self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', obj) self.fs.radosm(["put", obj, "-"], stdin=StringIO(lua_code)) with self.assert_cluster_log(failure + obj + " " + expect): log.info("run a " + obj + " balancer that expects=" + expect) @@ -31,16 +31,16 @@ class TestMantle(CephFSTestCase): self.start_mantle() expect = " : (2) No such file or directory" - ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer') + ret = self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer') assert(ret == 22) # EINVAL - self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ") + self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', " ") with self.assert_cluster_log(failure + " " + expect): pass def test_version_not_in_rados(self): self.start_mantle() expect = failure + "ghost.lua : (2) No such file or directory" - self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua") + self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua") with self.assert_cluster_log(expect): pass def test_balancer_invalid(self): @@ -59,7 +59,7 @@ class TestMantle(CephFSTestCase): def test_balancer_valid(self): self.start_mantle() lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}" - self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") + self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") self.fs.radosm(["put", "valid.lua", "-"], stdin=StringIO(lua_code)) with self.assert_cluster_log(success + "valid.lua"): log.info("run a valid.lua balancer") @@ -96,11 +96,11 @@ class TestMantle(CephFSTestCase): # kill the OSDs so that the balancer pull from RADOS times out osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty')) for i in range(0, len(osd_map['osds'])): - self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i)) - self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i)) + self.get_ceph_cmd_result('osd', 'down', str(i)) + self.get_ceph_cmd_result('osd', 'out', str(i)) # trigger a pull from RADOS - self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") + self.get_ceph_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") # make the timeout a little longer since dead OSDs spam ceph -w with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30): @@ -108,4 +108,4 @@ class TestMantle(CephFSTestCase): # cleanup for i in range(0, len(osd_map['osds'])): - self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i)) + self.get_ceph_cmd_result('osd', 'in', str(i)) diff --git a/qa/tasks/cephfs/test_mds_metrics.py b/qa/tasks/cephfs/test_mds_metrics.py index ad877f62280..4fb2f969b38 100644 --- a/qa/tasks/cephfs/test_mds_metrics.py +++ b/qa/tasks/cephfs/test_mds_metrics.py @@ -115,7 +115,7 @@ class TestMDSMetrics(CephFSTestCase): # Reconfigure client auth caps for mount in self.mounts: - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', f"client.{mount.client_id}", 'mds', 'allow', 'mon', 'allow r', diff --git a/qa/tasks/cephfs/test_mirroring.py b/qa/tasks/cephfs/test_mirroring.py index f4a9766fefc..45479cbc9df 100644 --- a/qa/tasks/cephfs/test_mirroring.py +++ b/qa/tasks/cephfs/test_mirroring.py @@ -525,12 +525,13 @@ class TestMirroring(CephFSTestCase): def test_cephfs_mirror_stats(self): log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() @@ -613,12 +614,13 @@ class TestMirroring(CephFSTestCase): def test_cephfs_mirror_cancel_sync(self): log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() @@ -655,12 +657,13 @@ class TestMirroring(CephFSTestCase): def test_cephfs_mirror_restart_sync_on_blocklist(self): log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() @@ -905,12 +908,13 @@ class TestMirroring(CephFSTestCase): def test_cephfs_mirror_symlink_sync(self): log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() @@ -1066,12 +1070,13 @@ class TestMirroring(CephFSTestCase): def test_cephfs_mirror_incremental_sync(self): """ Test incremental snapshot synchronization (based on mtime differences).""" log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) @@ -1157,12 +1162,13 @@ class TestMirroring(CephFSTestCase): file_z | sym dir reg sym """ log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) @@ -1235,12 +1241,13 @@ class TestMirroring(CephFSTestCase): """ log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) @@ -1324,12 +1331,13 @@ class TestMirroring(CephFSTestCase): that all replayer threads (3 by default) in the mirror daemon are busy. """ log.debug('reconfigure client auth caps') - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_b.client_id), 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( - self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) + self.backup_fs.get_data_pool_name(), + self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() @@ -1428,7 +1436,7 @@ class TestMirroring(CephFSTestCase): log.debug('reconfigure client auth caps') cid = self.mount_b.client_id data_pool = self.backup_fs.get_data_pool_name() - self.mds_cluster.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', f"client.{cid}", 'mds', 'allow rw', 'mon', 'allow r', diff --git a/qa/tasks/cephfs/test_nfs.py b/qa/tasks/cephfs/test_nfs.py index 68f67237f80..a43f206e705 100644 --- a/qa/tasks/cephfs/test_nfs.py +++ b/qa/tasks/cephfs/test_nfs.py @@ -142,7 +142,7 @@ class TestNFS(MgrTestCase): :param cmd_args: nfs command arguments to be run ''' cmd_func() - ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd_args) + ret = self.get_ceph_cmd_result(*cmd_args) if ret != 0: self.fail("Idempotency test failed") diff --git a/qa/tasks/cephfs/test_pool_perm.py b/qa/tasks/cephfs/test_pool_perm.py index 9912debedee..b55052b826e 100644 --- a/qa/tasks/cephfs/test_pool_perm.py +++ b/qa/tasks/cephfs/test_pool_perm.py @@ -30,9 +30,9 @@ class TestPoolPerm(CephFSTestCase): client_name = "client.{0}".format(self.mount_a.client_id) # set data pool read only - self.fs.mon_manager.raw_cluster_cmd_result( - 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd', - 'allow r pool={0}'.format(self.fs.get_data_pool_name())) + self.get_ceph_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', + 'osd', 'allow r pool={0}'.format(self.fs.get_data_pool_name())) self.mount_a.umount_wait() self.mount_a.mount_wait() @@ -41,9 +41,9 @@ class TestPoolPerm(CephFSTestCase): self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False))) # set data pool write only - self.fs.mon_manager.raw_cluster_cmd_result( - 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd', - 'allow w pool={0}'.format(self.fs.get_data_pool_name())) + self.get_ceph_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', + 'osd', 'allow w pool={0}'.format(self.fs.get_data_pool_name())) self.mount_a.umount_wait() self.mount_a.mount_wait() @@ -66,7 +66,7 @@ class TestPoolPerm(CephFSTestCase): self.mount_a.run_shell(["mkdir", "layoutdir"]) # Set MDS 'rw' perms: missing 'p' means no setting pool layouts - self.fs.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r', 'osd', 'allow rw pool={0},allow rw pool={1}'.format( @@ -86,7 +86,7 @@ class TestPoolPerm(CephFSTestCase): self.mount_a.umount_wait() # Set MDS 'rwp' perms: should now be able to set layouts - self.fs.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r', 'osd', 'allow rw pool={0},allow rw pool={1}'.format( @@ -101,7 +101,7 @@ class TestPoolPerm(CephFSTestCase): self.mount_a.umount_wait() def tearDown(self): - self.fs.mon_manager.raw_cluster_cmd_result( + self.get_ceph_cmd_result( 'auth', 'caps', "client.{0}".format(self.mount_a.client_id), 'mds', 'allow', 'mon', 'allow r', 'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0])) diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index 608dcc81f15..29dc06066ad 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -566,40 +566,40 @@ class TestMonSnapsAndFsPools(CephFSTestCase): """ test_pool_name = 'snap-test-pool' base_cmd = f'osd pool create {test_pool_name}' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) self.assertEqual(ret, 0) self.fs.rados(["mksnap", "snap3"], pool=test_pool_name) base_cmd = f'fs add_data_pool {self.fs.name} {test_pool_name}' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) self.assertEqual(ret, errno.EOPNOTSUPP) # cleanup self.fs.rados(["rmsnap", "snap3"], pool=test_pool_name) base_cmd = f'osd pool delete {test_pool_name}' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) def test_using_pool_with_snap_fails_fs_creation(self): """ Test that using a pool with snaps for fs creation fails """ base_cmd = 'osd pool create test_data_pool' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) self.assertEqual(ret, 0) base_cmd = 'osd pool create test_metadata_pool' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) self.assertEqual(ret, 0) self.fs.rados(["mksnap", "snap4"], pool='test_data_pool') base_cmd = 'fs new testfs test_metadata_pool test_data_pool' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) self.assertEqual(ret, errno.EOPNOTSUPP) # cleanup self.fs.rados(["rmsnap", "snap4"], pool='test_data_pool') base_cmd = 'osd pool delete test_data_pool' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False) base_cmd = 'osd pool delete test_metadata_pool' - ret = self.run_cluster_cmd_result(base_cmd) + ret = self.get_ceph_cmd_result(args=base_cmd, check_status=False)