From 8d196b001ccb36bd0916da6e03207ae65ec80116 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Thu, 14 Jul 2011 16:47:29 -0700 Subject: [PATCH] Make targets a dictionary mapping hosts to ssh host keys. --- README.rst | 5 +++-- teuthology/lock.py | 8 ++++---- teuthology/task/internal.py | 6 +++--- teuthology/task/s3tests.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 71930bb1fde6f..1c81ff0a4540c 100644 --- a/README.rst +++ b/README.rst @@ -45,8 +45,9 @@ Test configuration An integration test run takes three items of configuration: -- ``targets``: what hosts to run on; this is a list of entries like - "username@hostname.example.com" +- ``targets``: what hosts to run on; this is a dictionary mapping + hosts to ssh host keys, like: + "username@hostname.example.com: ssh-rsa long_hostkey_here" - ``roles``: how to use the hosts; this is a list of lists, where each entry lists all the roles to be run on a single host; for example, a single entry might say ``[mon.1, osd.1]`` diff --git a/teuthology/lock.py b/teuthology/lock.py index e622002c50f0d..139df7e20641e 100644 --- a/teuthology/lock.py +++ b/teuthology/lock.py @@ -29,7 +29,7 @@ def lock_many(ctx, num, user=None): urllib.urlencode(dict(user=user, num=num))) if success: machines = json.loads(content) - log.debug('locked {machines}'.format(machines=', '.join(machines))) + log.debug('locked {machines}'.format(machines=', '.join(machines.keys()))) return machines log.warn('Could not lock %d nodes', num) return [] @@ -190,7 +190,7 @@ Lock, unlock, or query lock status of machines. g = yaml.safe_load_all(f) for new in g: if 'targets' in new: - for t in new['targets']: + for t in new['targets'].iterkeys(): machines.append(t) except IOError, e: raise argparse.ArgumentTypeError(str(e)) @@ -241,7 +241,7 @@ Lock, unlock, or query lock status of machines. if not result: ret = 1 else: - machines_to_update = result + machines_to_update = result.keys() print yaml.safe_dump(dict(targets=result), default_flow_style=False) elif ctx.update: assert ctx.desc is not None or ctx.status is not None, \ @@ -310,7 +310,7 @@ to run on, or use -a to check all of them automatically. g = yaml.safe_load_all(f) for new in g: if 'targets' in new: - for t in new['targets']: + for t in new['targets'].iterkeys(): machines.append(t) except IOError, e: raise argparse.ArgumentTypeError(str(e)) diff --git a/teuthology/task/internal.py b/teuthology/task/internal.py index f5a7cca131e5f..cf4e9f44ad3b2 100644 --- a/teuthology/task/internal.py +++ b/teuthology/task/internal.py @@ -72,12 +72,12 @@ def lock_machines(ctx, config): finally: if not ctx.keep_locked_on_error or ctx.summary.get('success', False): log.info('Unlocking machines...') - for machine in ctx.config['targets']: + for machine in ctx.config['targets'].iterkeys(): lock.unlock(ctx, machine, ctx.owner) def check_lock(ctx, config): log.info('Checking locks...') - for machine in ctx.config['targets']: + for machine in ctx.config['targets'].iterkeys(): status = lock.get_status(ctx, machine) log.debug('machine status is %s', repr(status)) assert status is not None, \ @@ -98,7 +98,7 @@ def connect(ctx, config): import orchestra.cluster remotes = [remote.Remote(name=t, ssh=connection.connect(t)) - for t in ctx.config['targets']] + for t, key in ctx.config['targets'].iteritems()] ctx.cluster = orchestra.cluster.Cluster() if 'roles' in ctx.config: for rem, roles in zip(remotes, ctx.config['roles']): diff --git a/teuthology/task/s3tests.py b/teuthology/task/s3tests.py index 0d4cbad4dfa5d..9944c48950500 100644 --- a/teuthology/task/s3tests.py +++ b/teuthology/task/s3tests.py @@ -81,7 +81,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'], ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: -- 2.39.5