]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
workunit: Add option to use specified subdir
authorSam Lang <sam.lang@inktank.com>
Wed, 7 Nov 2012 04:28:56 +0000 (20:28 -0800)
committerSam Lang <sam.lang@inktank.com>
Thu, 8 Nov 2012 15:09:23 +0000 (09:09 -0600)
Signed-off-by: Sam Lang <sam.lang@inktank.com>
teuthology/task/workunit.py

index 06ff6664b8821dafaf4dcfffe0a3e983f6bc751a..03a893edf4a6275ebeb89ea32c196e56854d63ec 100644 (file)
@@ -76,7 +76,7 @@ def task(ctx, config):
             continue
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
-        _make_scratch_dir(ctx, role)
+        _make_scratch_dir(ctx, role, config.get('subdir'))
     all_spec = False #is there an all grouping?
     with parallel() as p:
         for role, tests in clients.iteritems():
@@ -87,15 +87,16 @@ def task(ctx, config):
 
     if all_spec:
         all_tasks = clients["all"]
-        _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'))
+        _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), config.get('subdir'))
 
-def _make_scratch_dir(ctx, role):
+def _make_scratch_dir(ctx, role, subdir):
     PREFIX = 'client.'
     id_ = role[len(PREFIX):]
     log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
     (remote,) = ctx.cluster.only(role).remotes.iterkeys()
     dir_owner = remote.shortname.split('@', 1)[0]
     mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+    if not subdir: subdir = 'client.{id}'.format(id=id_)
     remote.run(
         args=[
             # cd first so this will fail if the mount point does
@@ -109,26 +110,26 @@ def _make_scratch_dir(ctx, role):
             'install',
             '-d',
             '-m', '0755',
-            '--owner={user}'.format(user=dir_owner), 
+            '--owner={user}'.format(user=dir_owner),
             '--',
-            'client.{id}'.format(id=id_),
+            subdir,
             ],
         )
 
-def _spawn_on_all_clients(ctx, refspec, tests, env):
+def _spawn_on_all_clients(ctx, refspec, tests, env, subdir):
     client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
     client_remotes = list()
     for client in client_generator:
         (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
         client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
-        _make_scratch_dir(ctx, "client.{id}".format(id=client))
-        
+        _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
+
     for unit in tests:
         with parallel() as p:
             for remote, role in client_remotes:
-                p.spawn(_run_tests, ctx, refspec, role, [unit], env)
+                p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
 
-def _run_tests(ctx, refspec, role, tests, env):
+def _run_tests(ctx, refspec, role, tests, env, subdir):
     assert isinstance(role, basestring)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
@@ -136,7 +137,10 @@ def _run_tests(ctx, refspec, role, tests, env):
     (remote,) = ctx.cluster.only(role).remotes.iterkeys()
     mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
     # subdir so we can remove and recreate this a lot without sudo
-    scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
+    if subdir:
+        scratch_tmp = os.path.join(mnt, subdir)
+    else:
+        scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
     srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role)
     secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role)
     teuthology.write_secret_file(remote, role, secretfile)