From 4902bfabf306c27546a6ffcb92bccf585b20f383 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Mon, 30 Sep 2013 16:18:39 -0500 Subject: [PATCH] PEP-8 cleanup Signed-off-by: Zack Cerza --- teuthology/nuke.py | 108 +++++++++++++++++++++++++++++---------------- 1 file changed, 71 insertions(+), 37 deletions(-) diff --git a/teuthology/nuke.py b/teuthology/nuke.py index 59ddb213c83fd..7ad419c4a186f 100644 --- a/teuthology/nuke.py +++ b/teuthology/nuke.py @@ -3,6 +3,7 @@ import yaml import textwrap from argparse import RawTextHelpFormatter + def parse_args(): from teuthology.run import config_file from teuthology.run import MergeConfig @@ -10,10 +11,10 @@ def parse_args(): parser = argparse.ArgumentParser( description='Reset test machines', epilog=textwrap.dedent(''' - Examples: - teuthology-nuke -t target.yaml --unlock --owner user@host - teuthology-nuke -t target.yaml --pid 1234 --unlock --owner user@host \n - '''), + Examples: + teuthology-nuke -t target.yaml --unlock --owner user@host + teuthology-nuke -t target.yaml --pid 1234 --unlock --owner user@host \n + '''), formatter_class=RawTextHelpFormatter) parser.add_argument( '-v', '--verbose', @@ -39,9 +40,10 @@ def parse_args(): help='job owner', ) parser.add_argument( - '-p','--pid', - type=int, - default=False, + '-p', + '--pid', + type=int, + default=False, help='pid of the process to be killed', ) parser.add_argument( @@ -76,6 +78,7 @@ def parse_args(): args = parser.parse_args() return args + def shutdown_daemons(ctx, log): from .orchestra import run nodes = {} @@ -103,7 +106,7 @@ def shutdown_daemons(ctx, log): 'rados', 'apache2', run.Raw('||'), - 'true', # ignore errors from ceph binaries not being found + 'true', # ignore errors from ceph binaries not being found ], wait=False, ) @@ -113,6 +116,7 @@ def shutdown_daemons(ctx, log): log.info('Waiting for %s to finish shutdowns...', name) proc.exitstatus.get() + def find_kernel_mounts(ctx, log): from .orchestra import run nodes = {} @@ -120,9 +124,9 @@ def find_kernel_mounts(ctx, log): for remote in ctx.cluster.remotes.iterkeys(): proc = remote.run( args=[ - 'grep', '-q', ' ceph ' , '/etc/mtab', + 'grep', '-q', ' ceph ', '/etc/mtab', run.Raw('||'), - 'grep', '-q', '^/dev/rbd' , '/etc/mtab', + 'grep', '-q', '^/dev/rbd', '/etc/mtab', ], wait=False, ) @@ -133,11 +137,12 @@ def find_kernel_mounts(ctx, log): proc.exitstatus.get() log.debug('kernel mount exists on %s', remote.name) kernel_mounts.append(remote) - except run.CommandFailedError: # no mounts! + except run.CommandFailedError: # no mounts! log.debug('no kernel mount on %s', remote.name) return kernel_mounts + def remove_kernel_mounts(ctx, kernel_mounts, log): """ properly we should be able to just do a forced unmount, @@ -163,6 +168,7 @@ def remove_kernel_mounts(ctx, kernel_mounts, log): for remote, proc in nodes: proc.exitstatus.get() + def remove_osd_mounts(ctx, log): """ unmount any osd data mounts (scratch disks) @@ -181,6 +187,7 @@ def remove_osd_mounts(ctx, log): ], ) + def remove_osd_tmpfs(ctx, log): """ unmount tmpfs mounts @@ -196,20 +203,21 @@ def remove_osd_tmpfs(ctx, log): ], ) + def reboot(ctx, remotes, log): from .orchestra import run import time nodes = {} for remote in remotes: log.info('rebooting %s', remote.name) - proc = remote.run( # note use of -n to force a no-sync reboot + proc = remote.run( # note use of -n to force a no-sync reboot args=[ 'timeout', '5', 'sync', run.Raw(';'), 'sudo', 'reboot', '-f', '-n' - ], + ], wait=False - ) + ) nodes[remote] = proc # we just ignore these procs because reboot -f doesn't actually # send anything back to the ssh client! @@ -218,8 +226,9 @@ def reboot(ctx, remotes, log): from teuthology.misc import reconnect if remotes: log.info('waiting for nodes to reboot') - time.sleep(5) #if we try and reconnect too quickly, it succeeds! - reconnect(ctx, 480) #allow 8 minutes for the reboots + time.sleep(5) # if we try and reconnect too quickly, it succeeds! + reconnect(ctx, 480) # allow 8 minutes for the reboots + def reset_syslog_dir(ctx, log): from .orchestra import run @@ -245,6 +254,7 @@ def reset_syslog_dir(ctx, log): log.info('Waiting for %s to restart syslog...', name) proc.exitstatus.get() + def dpkg_configure(ctx, log): from .orchestra import run nodes = {} @@ -262,20 +272,26 @@ def dpkg_configure(ctx, log): nodes[remote.name] = proc for name, proc in nodes.iteritems(): - log.info('Waiting for %s to dpkg --configure -a and apt-get -f install...', name) + log.info( + 'Waiting for %s to dpkg --configure -a and apt-get -f install...', + name) proc.exitstatus.get() + def remove_installed_packages(ctx, log): from teuthology.task import install as install_task dpkg_configure(ctx, log) config = {'project': 'ceph'} - install_task.remove_packages(ctx, config, - {"deb": install_task.deb_packages['ceph'], - "rpm": install_task.rpm_packages['ceph']}) + install_task.remove_packages( + ctx, + config, + {"deb": install_task.deb_packages['ceph'], + "rpm": install_task.rpm_packages['ceph']}) install_task.remove_sources(ctx, config) install_task.purge_data(ctx) + def remove_testing_tree(ctx, log): from teuthology.misc import get_testdir from .orchestra import run @@ -300,6 +316,7 @@ def remove_testing_tree(ctx, log): log.info('Waiting for %s to clear filesystem...', name) proc.exitstatus.get() + def synch_clocks(remotes, log): from .orchestra import run nodes = {} @@ -323,9 +340,12 @@ def synch_clocks(remotes, log): log.info('Waiting for clock to synchronize on %s...', name) proc.exitstatus.get() + def main(): - from gevent import monkey; monkey.patch_all(dns=False) - from .orchestra import monkey; monkey.patch_all() + import gevent.monkey + gevent.monkey.patch_all(dns=False) + from .orchestra import monkey + monkey.patch_all() from teuthology.run import config_file import os @@ -363,7 +383,11 @@ def main(): from teuthology.misc import read_config read_config(ctx) - log.info('\n '.join(['targets:', ] + yaml.safe_dump(ctx.config['targets'], default_flow_style=False).splitlines())) + log.info( + '\n '.join( + ['targets:', ] + yaml.safe_dump( + ctx.config['targets'], + default_flow_style=False).splitlines())) if ctx.owner is None: from teuthology.misc import get_user @@ -373,15 +397,16 @@ def main(): if ctx.archive: log.info('Killing teuthology process at pid %d', ctx.pid) os.system('grep -q %s /proc/%d/cmdline && sudo kill %d' % ( - ctx.archive, - ctx.pid, - ctx.pid)) + ctx.archive, + ctx.pid, + ctx.pid)) else: import subprocess - subprocess.check_call(["kill", "-9", str(ctx.pid)]); + subprocess.check_call(["kill", "-9", str(ctx.pid)]) nuke(ctx, log, ctx.unlock, ctx.synch_clocks, ctx.reboot_all, ctx.noipmi) + def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True, noipmi=False): from teuthology.parallel import parallel @@ -415,7 +440,12 @@ def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True, if unnuked: total_unnuked.update(unnuked) if total_unnuked: - log.error('Could not nuke the following targets:\n' + '\n '.join(['targets:', ] + yaml.safe_dump(total_unnuked, default_flow_style=False).splitlines())) + log.error('Could not nuke the following targets:\n' + + '\n '.join(['targets:', ] + + yaml.safe_dump( + total_unnuked, + default_flow_style=False).splitlines())) + def nuke_one(ctx, targets, log, should_unlock, synch_clocks, reboot_all, check_locks, noipmi): @@ -443,6 +473,7 @@ def nuke_one(ctx, targets, log, should_unlock, synch_clocks, reboot_all, unlock(ctx, target, ctx.owner) return ret + def nuke_helper(ctx, log): # ensure node is up with ipmi from teuthology.orchestra import remote @@ -455,11 +486,14 @@ def nuke_helper(ctx, log): log.debug('shortname: %s' % shortname) log.debug('{ctx}'.format(ctx=ctx)) if not ctx.noipmi and 'ipmi_user' in ctx.teuthology_config: - console = remote.getRemoteConsole(name=host, - ipmiuser=ctx.teuthology_config['ipmi_user'], - ipmipass=ctx.teuthology_config['ipmi_password'], - ipmidomain=ctx.teuthology_config['ipmi_domain']) - cname = '{host}.{domain}'.format(host=shortname, domain=ctx.teuthology_config['ipmi_domain']) + console = remote.getRemoteConsole( + name=host, + ipmiuser=ctx.teuthology_config['ipmi_user'], + ipmipass=ctx.teuthology_config['ipmi_password'], + ipmidomain=ctx.teuthology_config['ipmi_domain']) + cname = '{host}.{domain}'.format( + host=shortname, + domain=ctx.teuthology_config['ipmi_domain']) log.info('checking console status of %s' % cname) if not console.check_status(): # not powered on or can't get IPMI status. Try to power on @@ -467,7 +501,8 @@ def nuke_helper(ctx, log): # try to get status again, waiting for login prompt this time log.info('checking console status of %s' % cname) if not console.check_status(100): - log.error('Failed to get console status for %s, disabling console...' % cname) + log.error('Failed to get console status for %s, ' + + 'disabling console...' % cname) log.info('console ready on %s' % cname) else: log.info('console ready on %s' % cname) @@ -504,9 +539,8 @@ def nuke_helper(ctx, log): synch_clocks(need_reboot, log) log.info('Making sure firmware.git is not locked...') - ctx.cluster.run(args=[ - 'sudo', 'rm', '-f', '/lib/firmware/updates/.git/index.lock', - ]) + ctx.cluster.run(args=['sudo', 'rm', '-f', + '/lib/firmware/updates/.git/index.lock', ]) log.info('Reseting syslog output locations...') reset_syslog_dir(ctx, log) -- 2.39.5