'--owner',
help='job owner'
)
+ parser.add_argument(
+ '--lock',
+ action='store_true',
+ default=False,
+ help='lock machines for the duration of the run',
+ )
args = parser.parse_args()
return args
yaml.safe_dump(ctx.config, f, default_flow_style=False)
log.debug('\n '.join(['Config:', ] + yaml.safe_dump(ctx.config, default_flow_style=False).splitlines()))
- log.info('Opening connections...')
-
- from orchestra import connection, remote
- import orchestra.cluster
-
- remotes = [remote.Remote(name=t, ssh=connection.connect(t))
- for t in ctx.config['targets']]
- ctx.cluster = orchestra.cluster.Cluster()
- for rem, roles in zip(remotes, ctx.config['roles']):
- ctx.cluster.add(rem, roles)
ctx.summary = {}
- if ctx.owner is not None:
- ctx.summary['owner'] = ctx.owner
- else:
+ if ctx.owner is None:
from teuthology.misc import get_user
- ctx.summary['owner'] = get_user()
+ ctx.owner = get_user()
+ ctx.summary['owner'] = ctx.owner
if ctx.description is not None:
ctx.summary['description'] = ctx.description
assert 'kernel' not in task, \
'kernel installation shouldn be a base-level item, not part of the tasks list'
- init_tasks = [{'internal.check_conflict': None}]
+ init_tasks = []
+ if ctx.lock:
+ assert 'targets' not in ctx.config, \
+ 'You cannot specify targets in a config file when using the --lock option'
+ init_tasks.append({'internal.lock_machines': len(ctx.config['roles'])})
+
+ init_tasks.extend([
+ {'internal.check_lock': None},
+ {'internal.connect': None},
+ {'internal.check_conflict': None},
+ ])
if 'kernel' in ctx.config:
init_tasks.append({'kernel': ctx.config['kernel']})
init_tasks.extend([
import os
import tarfile
+from teuthology import lock
from teuthology import misc as teuthology
from teuthology import safepath
from orchestra import run
)
+@contextlib.contextmanager
+def lock_machines(ctx, config):
+ log.info('Locking machines...')
+ assert isinstance(config, int), 'config must be an integer'
+ newly_locked = lock.lock_many(config, ctx.owner)
+ if len(newly_locked) != config:
+ log.error('Could not lock enough machines, unlocking and exiting...')
+ for machine in newly_locked:
+ lock.unlock(machine, ctx.owner)
+ assert 0
+ ctx.config['targets'] = newly_locked
+ try:
+ yield
+ finally:
+ log.info('Unlocking machines...')
+ for machine in newly_locked:
+ lock.unlock(machine, ctx.owner)
+
+def check_lock(ctx, config):
+ log.info('Checking locks...')
+ for machine in ctx.config['targets']:
+ status = lock.get_status(machine)
+ log.debug('machine status is %s', repr(status))
+ assert status is not None, \
+ 'could not read lock status for {name}'.format(name=machine)
+ assert status['up'], 'machine {name} is marked down'.format(name=machine)
+ assert status['locked'], \
+ 'machine {name} is not locked'.format(name=machine)
+ assert status['locked_by'] == ctx.owner, \
+ 'machine {name} is locked by {user}, not {owner}'.format(
+ name=machine,
+ user=status['locked_by'],
+ owner=ctx.owner,
+ )
+
+def connect(ctx, config):
+ log.info('Opening connections...')
+ from orchestra import connection, remote
+ import orchestra.cluster
+
+ remotes = [remote.Remote(name=t, ssh=connection.connect(t))
+ for t in ctx.config['targets']]
+ ctx.cluster = orchestra.cluster.Cluster()
+ for rem, roles in zip(remotes, ctx.config['roles']):
+ ctx.cluster.add(rem, roles)
+
def check_conflict(ctx, config):
log.info('Checking for old test directory...')
processes = ctx.cluster.run(