From eac2c2abbbbde8020e2101264d7fa2569e7a68ff Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Fri, 30 May 2014 14:32:38 -0500 Subject: [PATCH] Update users of the teuthology.orchestra.run APIs Signed-off-by: Zack Cerza --- teuthology/misc.py | 2 +- teuthology/nuke.py | 16 ++++++++-------- teuthology/task/ceph.py | 2 +- teuthology/task/filestore_idempotent.py | 6 +++--- teuthology/task/internal.py | 9 +++------ teuthology/task/kernel.py | 8 ++++---- teuthology/task/lockfile.py | 18 +++++++++--------- teuthology/task/locktest.py | 8 ++++---- teuthology/task/osd_backfill.py | 8 ++++---- teuthology/task/osd_failsafe_enospc.py | 10 +++++----- teuthology/task/osd_recovery.py | 12 ++++++------ teuthology/task/parallel_example.py | 4 ++-- teuthology/task/radosgw_agent.py | 2 +- teuthology/task/restart.py | 2 +- 14 files changed, 52 insertions(+), 55 deletions(-) diff --git a/teuthology/misc.py b/teuthology/misc.py index 15cbcf007e..45fbc8afb4 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -854,7 +854,7 @@ def wait_until_fuse_mounted(remote, fuse, mountpoint): fstype=fstype)) # it shouldn't have exited yet; exposes some trivial problems - assert not fuse.exitstatus.ready() + assert not fuse.poll() time.sleep(5) log.info('ceph-fuse is mounted on %s', mountpoint) diff --git a/teuthology/nuke.py b/teuthology/nuke.py index c971cbd1d4..eebdddb266 100644 --- a/teuthology/nuke.py +++ b/teuthology/nuke.py @@ -58,7 +58,7 @@ def shutdown_daemons(ctx): for name, proc in nodes.iteritems(): log.info('Waiting for %s to finish shutdowns...', name) - proc.exitstatus.get() + proc.wait() def find_kernel_mounts(ctx): @@ -77,7 +77,7 @@ def find_kernel_mounts(ctx): kernel_mounts = list() for remote, proc in nodes.iteritems(): try: - proc.exitstatus.get() + proc.wait() log.debug('kernel mount exists on %s', remote.name) kernel_mounts.append(remote) except run.CommandFailedError: # no mounts! @@ -108,7 +108,7 @@ def remove_kernel_mounts(ctx, kernel_mounts): nodes[remote] = proc for remote, proc in nodes: - proc.exitstatus.get() + proc.wait() def remove_osd_mounts(ctx): @@ -165,7 +165,7 @@ def reboot(ctx, remotes): # we just ignore these procs because reboot -f doesn't actually # send anything back to the ssh client! # for remote, proc in nodes.iteritems(): - # proc.exitstatus.get() + # proc.wait() if remotes: log.info('waiting for nodes to reboot') time.sleep(8) # if we try and reconnect too quickly, it succeeds! @@ -193,7 +193,7 @@ def reset_syslog_dir(ctx): for name, proc in nodes.iteritems(): log.info('Waiting for %s to restart syslog...', name) - proc.exitstatus.get() + proc.wait() def dpkg_configure(ctx): @@ -215,7 +215,7 @@ def dpkg_configure(ctx): log.info( 'Waiting for %s to dpkg --configure -a and apt-get -f install...', name) - proc.exitstatus.get() + proc.wait() def remove_installed_packages(ctx): @@ -251,7 +251,7 @@ def remove_testing_tree(ctx): for name, proc in nodes.iteritems(): log.info('Waiting for %s to clear filesystem...', name) - proc.exitstatus.get() + proc.wait() def synch_clocks(remotes): @@ -274,7 +274,7 @@ def synch_clocks(remotes): nodes[remote.name] = proc for name, proc in nodes.iteritems(): log.info('Waiting for clock to synchronize on %s...', name) - proc.exitstatus.get() + proc.wait() def main(ctx): diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 39507be1c5..6bba6b285c 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -348,7 +348,7 @@ def valgrind_post(ctx, config): valgrind_exception = None for (proc, remote) in lookup_procs: - proc.exitstatus.get() + proc.wait() out = proc.stdout.getvalue() for line in out.split('\n'): if line == '': diff --git a/teuthology/task/filestore_idempotent.py b/teuthology/task/filestore_idempotent.py index d33ad64584..1689a3764b 100644 --- a/teuthology/task/filestore_idempotent.py +++ b/teuthology/task/filestore_idempotent.py @@ -65,8 +65,8 @@ def task(ctx, config): ], wait=False, check_status=False) - result = proc.exitstatus.get(); - + result = proc.wait() + if result != 0: remote.run( args=[ @@ -78,4 +78,4 @@ def task(ctx, config): remote.run(args=[ 'rm', '-rf', '--', dir ]) - + diff --git a/teuthology/task/internal.py b/teuthology/task/internal.py index 533d98a3ab..fe92f1f28f 100644 --- a/teuthology/task/internal.py +++ b/teuthology/task/internal.py @@ -5,7 +5,6 @@ the calls are made from other modules, most notably teuthology/run.py """ from cStringIO import StringIO import contextlib -import gevent import logging import os import time @@ -258,9 +257,8 @@ def check_ceph_data(ctx, config): ) failed = False for proc in processes: - assert isinstance(proc.exitstatus, gevent.event.AsyncResult) try: - proc.exitstatus.get() + proc.wait() except run.CommandFailedError: log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname) failed = True @@ -281,9 +279,8 @@ def check_conflict(ctx, config): ) failed = False for proc in processes: - assert isinstance(proc.exitstatus, gevent.event.AsyncResult) try: - proc.exitstatus.get() + proc.wait() except run.CommandFailedError: log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir) failed = True @@ -574,7 +571,7 @@ def vm_setup(ctx, config): r = remote.run(args=['test', '-e', '/ceph-qa-ready',], stdout=StringIO(), check_status=False,) - if r.exitstatus != 0: + if r.returncode != 0: p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE) p2 = subprocess.Popen(['ssh', '-t', '-t', str(remote), 'sudo', 'sh'], stdin=p1.stdout, stdout=subprocess.PIPE) _, err = p2.communicate() diff --git a/teuthology/task/kernel.py b/teuthology/task/kernel.py index 2011eb8d22..263c0669a6 100644 --- a/teuthology/task/kernel.py +++ b/teuthology/task/kernel.py @@ -85,7 +85,7 @@ def _find_arch_and_dist(ctx): Currently this only returns armv7l on the quantal distro or x86_64 on the precise distro - + :param ctx: Context :returns: arch,distro """ @@ -293,7 +293,7 @@ def download_deb(ctx, config): for name, proc in procs.iteritems(): log.debug('Waiting for download/copy to %s to complete...', name) - proc.exitstatus.get() + proc.wait() def _no_grub_link(in_file, remote, kernel_ver): @@ -302,7 +302,7 @@ def _no_grub_link(in_file, remote, kernel_ver): (as is the case in Arm kernels) :param infile: kernel file or image file to be copied. - :param remote: remote machine + :param remote: remote machine :param kernel_ver: kernel version """ boot1 = '/boot/%s' % in_file @@ -469,7 +469,7 @@ def install_and_reboot(ctx, config): for name, proc in procs.iteritems(): log.debug('Waiting for install on %s to complete...', name) - proc.exitstatus.get() + proc.wait() def enable_disable_kdb(ctx, config): """ diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index 10ac1e8e91..8c18d46164 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -37,7 +37,7 @@ def task(ctx, config): {client: client.1, lockfile: testfile, holdtime: 5}, {client: client.2, lockfile: testfile, holdtime: 5, maxwait: 1, expectfail: True}] - + In the past this test would have failed; there was a bug where waitlocks weren't cleaned up if the process failed. More involved scenarios are also possible. @@ -48,7 +48,7 @@ def task(ctx, config): try: assert isinstance(config, list), \ "task lockfile got invalid config" - + log.info("building executable on each host") buildprocs = list() # build the locker executable on each client @@ -72,7 +72,7 @@ def task(ctx, config): badconfig = True if badconfig: raise KeyError("bad config {op_}".format(op_=op)) - + testdir = teuthology.get_testdir(ctx) clients = set(clients) files = set(files) @@ -82,7 +82,7 @@ def task(ctx, config): log.info("got a client remote") (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) - + proc = client_remote.run( args=[ 'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir), @@ -100,14 +100,14 @@ def task(ctx, config): ], logger=log.getChild('lockfile_client.{id}'.format(id=client_id)), wait=False - ) + ) log.info('building sclockandhold on client{id}'.format(id=client_id)) buildprocs.append(proc) - + # wait for builds to finish run.wait(buildprocs) log.info('finished building sclockandhold on all clients') - + # create the files to run these locks on client = clients.pop() clients.add(client) @@ -152,7 +152,7 @@ def task(ctx, config): lock_procs.append((greenlet, op)) time.sleep(0.1) # to provide proper ordering #for op in config - + for (greenlet, op) in lock_procs: log.debug('checking lock for op {op_}'.format(op_=op)) result = greenlet.get() @@ -217,7 +217,7 @@ def lock_one(op, ctx): stdin=run.PIPE, check_status=False ) - result = proc.exitstatus.get() + result = proc.wait() except gevent.Timeout as tout: if tout is not timeout: raise diff --git a/teuthology/task/locktest.py b/teuthology/task/locktest.py index 78323696ce..d896e18ffa 100755 --- a/teuthology/task/locktest.py +++ b/teuthology/task/locktest.py @@ -63,7 +63,7 @@ def task(ctx, config): ) log.info('built locktest on each client') - + host.run(args=['sudo', 'touch', '{mnt}/locktestfile'.format(mnt=hostmnt), run.Raw('&&'), @@ -96,9 +96,9 @@ def task(ctx, config): logger=log.getChild('locktest.client'), wait=False ) - - hostresult = hostproc.exitstatus.get() - clientresult = clientproc.exitstatus.get() + + hostresult = hostproc.wait() + clientresult = clientproc.wait() if (hostresult != 0) or (clientresult != 0): raise Exception("Did not pass locking test!") log.info('finished locktest executable with results {r} and {s}'. \ diff --git a/teuthology/task/osd_backfill.py b/teuthology/task/osd_backfill.py index d80ea22ef2..2798084ce6 100644 --- a/teuthology/task/osd_backfill.py +++ b/teuthology/task/osd_backfill.py @@ -39,10 +39,10 @@ def task(ctx, config): 'thrashosds task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) - assert num_osds == 3 + assert num_osds == 3 manager = ceph_manager.CephManager( mon, @@ -60,7 +60,7 @@ def task(ctx, config): # write some data p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', '--no-cleanup']) - err = p.exitstatus.get(); + err = p.wait() log.info('err is %d' % err) # mark osd.0 out to trigger a rebalance/backfill @@ -88,7 +88,7 @@ def task(ctx, config): manager.revive_osd(1) # wait for our writes to complete + succeed - err = p.exitstatus.get() + err = p.wait() log.info('err is %d' % err) # cluster must recover diff --git a/teuthology/task/osd_failsafe_enospc.py b/teuthology/task/osd_failsafe_enospc.py index 39b5b5c530..8f5da58778 100644 --- a/teuthology/task/osd_failsafe_enospc.py +++ b/teuthology/task/osd_failsafe_enospc.py @@ -76,7 +76,7 @@ def task(ctx, config): time.sleep(sleep_time) proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.exitstatus.get() + proc.wait() lines = proc.stdout.getvalue().split('\n') @@ -103,7 +103,7 @@ def task(ctx, config): time.sleep(sleep_time) proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.exitstatus.get() + proc.wait() lines = proc.stdout.getvalue().split('\n') @@ -142,7 +142,7 @@ def task(ctx, config): time.sleep(sleep_time) proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.exitstatus.get() + proc.wait() lines = proc.stdout.getvalue().split('\n') @@ -172,7 +172,7 @@ def task(ctx, config): time.sleep(sleep_time) proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.exitstatus.get() + proc.wait() lines = proc.stdout.getvalue().split('\n') @@ -200,7 +200,7 @@ def task(ctx, config): time.sleep(sleep_time) proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.exitstatus.get() + proc.wait() lines = proc.stdout.getvalue().split('\n') diff --git a/teuthology/task/osd_recovery.py b/teuthology/task/osd_recovery.py index 1ff17335b1..7fc749d3f5 100644 --- a/teuthology/task/osd_recovery.py +++ b/teuthology/task/osd_recovery.py @@ -39,10 +39,10 @@ def task(ctx, config): testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) - assert num_osds == 3 + assert num_osds == 3 manager = ceph_manager.CephManager( mon, @@ -82,7 +82,7 @@ def task(ctx, config): manager.revive_osd(1) # wait for our writes to complete + succeed - err = p.exitstatus.get() + err = p.wait() log.info('err is %d' % err) # cluster must repeer @@ -92,7 +92,7 @@ def task(ctx, config): # write some more (make sure osd.2 really is divergent) p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) - p.exitstatus.get(); + p.wait() # revive divergent osd manager.revive_osd(2) @@ -159,13 +159,13 @@ def test_incomplete_pgs(ctx, config): p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '1', '--no-cleanup']) - p.exitstatus.get() + p.wait() # few objects in metadata pool (with pg log, normal recovery) for f in range(1, 20): p = rados_start(testdir, mon, ['-p', 'metadata', 'put', 'foo.%d' % f, '/etc/passwd']) - p.exitstatus.get() + p.wait() # move it back manager.raw_cluster_cmd('osd', 'in', '0', '1') diff --git a/teuthology/task/parallel_example.py b/teuthology/task/parallel_example.py index 04babfca9b..3d871af226 100644 --- a/teuthology/task/parallel_example.py +++ b/teuthology/task/parallel_example.py @@ -33,7 +33,7 @@ def parallel_test(ctx, config): nodes[remote.name] = proc for name, proc in nodes.iteritems(): """Wait for each process to finish before yielding and allowing other contextmanagers to run.""" - proc.exitstatus.get() + proc.wait() yield @contextlib.contextmanager @@ -45,7 +45,7 @@ def task(ctx, config): assert(False), "task parallel_example only supports a list or dictionary for configuration" if config is None: config = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] if isinstance(config, list): config = dict.fromkeys(config) clients = config.keys() diff --git a/teuthology/task/radosgw_agent.py b/teuthology/task/radosgw_agent.py index e8ffe949da..2ebccae5cb 100644 --- a/teuthology/task/radosgw_agent.py +++ b/teuthology/task/radosgw_agent.py @@ -199,7 +199,7 @@ def task(ctx, config): for client, proc in procs: log.info("shutting down sync agent on %s", client) proc.stdin.close() - proc.exitstatus.get() + proc.wait() finally: for client, proc in procs: ctx.cluster.only(client).run( diff --git a/teuthology/task/restart.py b/teuthology/task/restart.py index 87ca2b099e..85e053a725 100644 --- a/teuthology/task/restart.py +++ b/teuthology/task/restart.py @@ -150,7 +150,7 @@ def task(ctx, config): proc.stdin.writelines(['restarted\n']) proc.stdin.flush() try: - proc.exitstatus.get() + proc.wait() except tor.CommandFailedError: raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c)) finally: -- 2.39.5