From 1f5c839d0e7b78c7ca4208eb1ddcf4660bf5a2d4 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 10 Mar 2021 14:30:32 -0800 Subject: [PATCH] qa: improve usability of do_rados helper Signed-off-by: Patrick Donnelly (cherry picked from commit 24bb1aa31ba690b405e17856855b9bf9148a9b19) --- qa/tasks/ceph_manager.py | 55 ++++++++++++++++------------------------ qa/tasks/repair_test.py | 10 ++------ qa/tasks/scrub_test.py | 10 +++----- 3 files changed, 28 insertions(+), 47 deletions(-) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 2b5a8a64450d6..e67ca06ddf351 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1669,10 +1669,13 @@ class CephManager: def flush_all_pg_stats(self): self.flush_pg_stats(range(len(self.get_osd_dump()))) - def do_rados(self, remote, cmd, check_status=True): + def do_rados(self, cmd, pool=None, namespace=None, remote=None, **kwargs): """ Execute a remote rados command. """ + if remote is None: + remote = self.controller + testdir = teuthology.get_testdir(self.ctx) pre = [ 'adjust-ulimits', @@ -1682,11 +1685,15 @@ class CephManager: '--cluster', self.cluster, ] + if pool is not None: + pre += ['--pool', pool] + if namespace is not None: + pre += ['--namespace', namespace] pre.extend(cmd) proc = remote.run( args=pre, wait=True, - check_status=check_status + **kwargs ) return proc @@ -1697,7 +1704,6 @@ class CephManager: Threads not used yet. """ args = [ - '-p', pool, '--num-objects', num_objects, '-b', size, 'bench', timelimit, @@ -1705,59 +1711,42 @@ class CephManager: ] if not cleanup: args.append('--no-cleanup') - return self.do_rados(self.controller, map(str, args)) + return self.do_rados(map(str, args), pool=pool) def do_put(self, pool, obj, fname, namespace=None): """ Implement rados put operation """ - args = ['-p', pool] - if namespace is not None: - args += ['-N', namespace] - args += [ - 'put', - obj, - fname - ] + args = ['put', obj, fname] return self.do_rados( - self.controller, args, - check_status=False + check_status=False, + pool=pool, + namespace=namespace ).exitstatus def do_get(self, pool, obj, fname='/dev/null', namespace=None): """ Implement rados get operation """ - args = ['-p', pool] - if namespace is not None: - args += ['-N', namespace] - args += [ - 'get', - obj, - fname - ] + args = ['get', obj, fname] return self.do_rados( - self.controller, args, - check_status=False + check_status=False, + pool=pool, + namespace=namespace, ).exitstatus def do_rm(self, pool, obj, namespace=None): """ Implement rados rm operation """ - args = ['-p', pool] - if namespace is not None: - args += ['-N', namespace] - args += [ - 'rm', - obj - ] + args = ['rm', obj] return self.do_rados( - self.controller, args, - check_status=False + check_status=False, + pool=pool, + namespace=namespace ).exitstatus def osd_admin_socket(self, osd_id, command, check_status=True, timeout=0, stdout=None): diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py index 0fbe6cb7799c1..cfd6ef79186a8 100644 --- a/qa/tasks/repair_test.py +++ b/qa/tasks/repair_test.py @@ -4,8 +4,6 @@ Test pool repairing after objects are damaged. import logging import time -from teuthology import misc as teuthology - log = logging.getLogger(__name__) @@ -123,20 +121,16 @@ def repair_test_2(ctx, manager, config, chooser): with manager.pool(pool, 1): log.info("starting repair test type 2") victim_osd = chooser(manager, pool, 0) - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.keys() # create object log.info("doing put and setomapval") manager.do_put(pool, 'file1', '/etc/hosts') - manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', - 'key', 'val']) + manager.do_rados(['setomapval', 'file1', 'key', 'val'], pool=pool) manager.do_put(pool, 'file2', '/etc/hosts') manager.do_put(pool, 'file3', '/etc/hosts') manager.do_put(pool, 'file4', '/etc/hosts') manager.do_put(pool, 'file5', '/etc/hosts') - manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', - 'key', 'val']) + manager.do_rados(['setomapval', 'file5', 'key', 'val'], pool=pool) manager.do_put(pool, 'file6', '/etc/hosts') # corrupt object diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py index d301bea0be1b9..3d629e9d746cb 100644 --- a/qa/tasks/scrub_test.py +++ b/qa/tasks/scrub_test.py @@ -285,8 +285,7 @@ def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id, pool = 'rbd' omap_key = 'key' omap_val = 'val' - manager.do_rados(mon, ['-p', pool, 'setomapval', obj_name, - omap_key, omap_val]) + manager.do_rados(['setomapval', obj_name, omap_key, omap_val], pool=pool) # Update missing digests, requires "osd deep scrub update digest min age: 0" pgnum = get_pgnum(pg) manager.do_pg_scrub(pool, pgnum, 'deep-scrub') @@ -370,8 +369,7 @@ def task(ctx, config): manager.wait_for_clean() # write some data - p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', - 'write', '-b', '4096']) + p = manager.do_rados(['bench', '--no-cleanup', '1', 'write', '-b', '4096'], pool='rbd') log.info('err is %d' % p.exitstatus) # wait for some PG to have data that we can mess with @@ -379,9 +377,9 @@ def task(ctx, config): osd = acting[0] osd_remote, obj_path, obj_name = find_victim_object(ctx, pg, osd) - manager.do_rados(mon, ['-p', 'rbd', 'setomapval', obj_name, 'key', 'val']) + manager.do_rados(['setomapval', obj_name, 'key', 'val'], pool='rbd') log.info('err is %d' % p.exitstatus) - manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', obj_name, 'hdr']) + manager.do_rados(['setomapheader', obj_name, 'hdr'], pool='rbd') log.info('err is %d' % p.exitstatus) # Update missing digests, requires "osd deep scrub update digest min age: 0" -- 2.39.5