From 9bfca879800fc1deb7774bc663294176baca33b7 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Wed, 6 Jul 2011 14:22:43 -0700 Subject: [PATCH] Check that all machines are locked, and add an option to lock machines instead of providing targets. --- teuthology/run.py | 35 +++++++++++++++------------ teuthology/task/internal.py | 47 +++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 15 deletions(-) diff --git a/teuthology/run.py b/teuthology/run.py index d96f71f1fee43..8e75cc3a1ae3e 100644 --- a/teuthology/run.py +++ b/teuthology/run.py @@ -48,6 +48,12 @@ def parse_args(): '--owner', help='job owner' ) + parser.add_argument( + '--lock', + action='store_true', + default=False, + help='lock machines for the duration of the run', + ) args = parser.parse_args() return args @@ -87,24 +93,13 @@ def main(): yaml.safe_dump(ctx.config, f, default_flow_style=False) log.debug('\n '.join(['Config:', ] + yaml.safe_dump(ctx.config, default_flow_style=False).splitlines())) - log.info('Opening connections...') - - from orchestra import connection, remote - import orchestra.cluster - - remotes = [remote.Remote(name=t, ssh=connection.connect(t)) - for t in ctx.config['targets']] - ctx.cluster = orchestra.cluster.Cluster() - for rem, roles in zip(remotes, ctx.config['roles']): - ctx.cluster.add(rem, roles) ctx.summary = {} - if ctx.owner is not None: - ctx.summary['owner'] = ctx.owner - else: + if ctx.owner is None: from teuthology.misc import get_user - ctx.summary['owner'] = get_user() + ctx.owner = get_user() + ctx.summary['owner'] = ctx.owner if ctx.description is not None: ctx.summary['description'] = ctx.description @@ -113,7 +108,17 @@ def main(): assert 'kernel' not in task, \ 'kernel installation shouldn be a base-level item, not part of the tasks list' - init_tasks = [{'internal.check_conflict': None}] + init_tasks = [] + if ctx.lock: + assert 'targets' not in ctx.config, \ + 'You cannot specify targets in a config file when using the --lock option' + init_tasks.append({'internal.lock_machines': len(ctx.config['roles'])}) + + init_tasks.extend([ + {'internal.check_lock': None}, + {'internal.connect': None}, + {'internal.check_conflict': None}, + ]) if 'kernel' in ctx.config: init_tasks.append({'kernel': ctx.config['kernel']}) init_tasks.extend([ diff --git a/teuthology/task/internal.py b/teuthology/task/internal.py index 305bf11163e41..bbc31e76a29db 100644 --- a/teuthology/task/internal.py +++ b/teuthology/task/internal.py @@ -5,6 +5,7 @@ import logging import os import tarfile +from teuthology import lock from teuthology import misc as teuthology from teuthology import safepath from orchestra import run @@ -42,6 +43,52 @@ def base(ctx, config): ) +@contextlib.contextmanager +def lock_machines(ctx, config): + log.info('Locking machines...') + assert isinstance(config, int), 'config must be an integer' + newly_locked = lock.lock_many(config, ctx.owner) + if len(newly_locked) != config: + log.error('Could not lock enough machines, unlocking and exiting...') + for machine in newly_locked: + lock.unlock(machine, ctx.owner) + assert 0 + ctx.config['targets'] = newly_locked + try: + yield + finally: + log.info('Unlocking machines...') + for machine in newly_locked: + lock.unlock(machine, ctx.owner) + +def check_lock(ctx, config): + log.info('Checking locks...') + for machine in ctx.config['targets']: + status = lock.get_status(machine) + log.debug('machine status is %s', repr(status)) + assert status is not None, \ + 'could not read lock status for {name}'.format(name=machine) + assert status['up'], 'machine {name} is marked down'.format(name=machine) + assert status['locked'], \ + 'machine {name} is not locked'.format(name=machine) + assert status['locked_by'] == ctx.owner, \ + 'machine {name} is locked by {user}, not {owner}'.format( + name=machine, + user=status['locked_by'], + owner=ctx.owner, + ) + +def connect(ctx, config): + log.info('Opening connections...') + from orchestra import connection, remote + import orchestra.cluster + + remotes = [remote.Remote(name=t, ssh=connection.connect(t)) + for t in ctx.config['targets']] + ctx.cluster = orchestra.cluster.Cluster() + for rem, roles in zip(remotes, ctx.config['roles']): + ctx.cluster.add(rem, roles) + def check_conflict(ctx, config): log.info('Checking for old test directory...') processes = ctx.cluster.run( -- 2.39.5