log = logging.getLogger(__name__)
+CLIENT_PREFIX = 'client.'
+
+
def task(ctx, config):
"""
Run ceph on all workunits found under the specified path.
log.info('Pulling workunits from ref %s', refspec)
- created_dir_dict = {}
+ created_mountpoint = {}
if config.get('env') is not None:
assert isinstance(config['env'], dict), 'env must be a dictionary'
clients = config['clients']
+
+ # Create scratch dirs for any non-all workunits
log.info('Making a separate scratch dir for every client...')
for role in clients.iterkeys():
assert isinstance(role, basestring)
if role == "all":
continue
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
+
+ assert role.startswith(CLIENT_PREFIX)
created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
- created_dir_dict[role] = created_mnt_dir
+ created_mountpoint[role] = created_mnt_dir
- all_spec = False #is there an all grouping?
+ # Execute any non-all workunits
with parallel() as p:
for role, tests in clients.iteritems():
if role != "all":
p.spawn(_run_tests, ctx, refspec, role, tests,
config.get('env'), timeout=timeout)
- else:
- all_spec = True
- if all_spec:
+ # Clean up dirs from any non-all workunits
+ for role, created in created_mountpoint.items():
+ _delete_dir(ctx, role, created)
+
+ # Execute any 'all' workunits
+ if 'all' in clients:
all_tasks = clients["all"]
_spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
config.get('subdir'), timeout=timeout)
- for role in clients.iterkeys():
- assert isinstance(role, basestring)
- if role == "all":
- continue
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- if created_dir_dict[role]:
- _delete_dir(ctx, role)
-
-def _delete_dir(ctx, role):
+def _delete_dir(ctx, role, created_mountpoint):
"""
Delete file used by this role, and delete the directory that this
role appeared in.
:param ctx: Context
:param role: "role.#" where # is used for the role id.
"""
- PREFIX = 'client.'
testdir = teuthology.get_testdir(ctx)
- id_ = role[len(PREFIX):]
+ id_ = role[len(CLIENT_PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# Is there any reason why this is not: join(mnt, role) ?
client = os.path.join(mnt, 'client.{id}'.format(id=id_))
- try:
- remote.run(
- args=[
- 'rm',
- '-rf',
- '--',
- client,
- ],
- )
- log.info("Deleted dir {dir}".format(dir=client))
- except Exception:
- log.exception("Caught an exception deleting dir {dir}".format(dir=client))
- try:
+ # Remove the directory inside the mount where the workunit ran
+ remote.run(
+ args=[
+ 'rm',
+ '-rf',
+ '--',
+ client,
+ ],
+ )
+ log.info("Deleted dir {dir}".format(dir=mnt))
+
+ # If the mount was an artificially created dir, delete that too
+ if created_mountpoint:
remote.run(
args=[
'rmdir',
'--',
mnt,
- ],
- )
- log.info("Deleted dir {dir}".format(dir=mnt))
- except Exception:
- log.exception("Caught an exception deleting dir {dir}".format(dir=mnt))
+ ],
+ )
+ log.info("Deleted artificial mount point {dir}".format(dir=client))
+
def _make_scratch_dir(ctx, role, subdir):
"""
:param role: "role.#" where # is used for the role id.
:param subdir: use this subdir (False if not used)
"""
- retVal = False
- PREFIX = 'client.'
- id_ = role[len(PREFIX):]
+ created_mountpoint = False
+ id_ = role[len(CLIENT_PREFIX):]
log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
dir_owner = remote.user
],
)
log.info('Created dir {dir}'.format(dir=mnt))
- retVal = True
+ created_mountpoint = True
+
+ if not subdir:
+ subdir = 'client.{id}'.format(id=id_)
- if not subdir: subdir = 'client.{id}'.format(id=id_)
- if retVal:
+ if created_mountpoint:
remote.run(
args=[
'cd',
],
)
- return retVal
+ return created_mountpoint
def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
"""
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
+
+ created_mountpoint = {}
for client in client_generator:
(client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
- _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
+ created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
for unit in tests:
with parallel() as p:
# cleanup the generated client directories
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
for client in client_generator:
- _delete_dir(ctx, 'client.{id}'.format(id=client))
+ _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
"""
testdir = teuthology.get_testdir(ctx)
assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
+ assert role.startswith(CLIENT_PREFIX)
+ id_ = role[len(CLIENT_PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# subdir so we can remove and recreate this a lot without sudo