From ea02fb74b99268454f0f65011a01cdba886431eb Mon Sep 17 00:00:00 2001 From: Sam Lang Date: Tue, 6 Nov 2012 20:28:56 -0800 Subject: [PATCH] workunit: Add option to use specified subdir Signed-off-by: Sam Lang --- teuthology/task/workunit.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/teuthology/task/workunit.py b/teuthology/task/workunit.py index 06ff6664b8821..03a893edf4a62 100644 --- a/teuthology/task/workunit.py +++ b/teuthology/task/workunit.py @@ -76,7 +76,7 @@ def task(ctx, config): continue PREFIX = 'client.' assert role.startswith(PREFIX) - _make_scratch_dir(ctx, role) + _make_scratch_dir(ctx, role, config.get('subdir')) all_spec = False #is there an all grouping? with parallel() as p: for role, tests in clients.iteritems(): @@ -87,15 +87,16 @@ def task(ctx, config): if all_spec: all_tasks = clients["all"] - _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env')) + _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), config.get('subdir')) -def _make_scratch_dir(ctx, role): +def _make_scratch_dir(ctx, role, subdir): PREFIX = 'client.' id_ = role[len(PREFIX):] log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role)) (remote,) = ctx.cluster.only(role).remotes.iterkeys() dir_owner = remote.shortname.split('@', 1)[0] mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + if not subdir: subdir = 'client.{id}'.format(id=id_) remote.run( args=[ # cd first so this will fail if the mount point does @@ -109,26 +110,26 @@ def _make_scratch_dir(ctx, role): 'install', '-d', '-m', '0755', - '--owner={user}'.format(user=dir_owner), + '--owner={user}'.format(user=dir_owner), '--', - 'client.{id}'.format(id=id_), + subdir, ], ) -def _spawn_on_all_clients(ctx, refspec, tests, env): +def _spawn_on_all_clients(ctx, refspec, tests, env, subdir): client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client') client_remotes = list() for client in client_generator: (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys() client_remotes.append((client_remote, 'client.{id}'.format(id=client))) - _make_scratch_dir(ctx, "client.{id}".format(id=client)) - + _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir) + for unit in tests: with parallel() as p: for remote, role in client_remotes: - p.spawn(_run_tests, ctx, refspec, role, [unit], env) + p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir) -def _run_tests(ctx, refspec, role, tests, env): +def _run_tests(ctx, refspec, role, tests, env, subdir): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) @@ -136,7 +137,10 @@ def _run_tests(ctx, refspec, role, tests, env): (remote,) = ctx.cluster.only(role).remotes.iterkeys() mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo - scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') + if subdir: + scratch_tmp = os.path.join(mnt, subdir) + else: + scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role) secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) teuthology.write_secret_file(remote, role, secretfile) -- 2.39.5