]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
nuke: refactor to run in parallel and add unlock option
authorJosh Durgin <josh.durgin@dreamhost.com>
Wed, 25 Apr 2012 00:51:16 +0000 (17:51 -0700)
committerJosh Durgin <josh.durgin@dreamhost.com>
Wed, 25 Apr 2012 00:52:01 +0000 (17:52 -0700)
nuke-on-error already did this, but now teuthology-nuke does it
too. Also outputs targets that couldn't be nuked at the end.

teuthology/nuke.py
teuthology/run.py

index 8b01f29281879004b540aff99ade99def1edacc5..8cd704b62b180cf34fdd57da106ef9b493a0aeec 100644 (file)
@@ -42,6 +42,13 @@ def parse_args():
         default=False,
         help='synchronize clocks on all machines',
         )
+    parser.add_argument(
+        '-u', '--unlock',
+        action='store_true',
+        default=False,
+        help='Unlock each successfully nuked machine, and output targets that'
+        'could not be nuked.'
+        )
     args = parser.parse_args()
     return args
 
@@ -255,9 +262,52 @@ def main():
         from teuthology.misc import get_user
         ctx.owner = get_user()
 
-    nuke(ctx, log)
+    nuke(ctx, log, ctx.unlock, ctx.synch_clocks, ctx.reboot_all)
+
+
+def nuke(ctx, log, should_unlock, sync_clocks=True, reboot_all=True):
+    from teuthology.parallel import parallel
+    total_unnuked = {}
+    with parallel() as p:
+        for target, hostkey in ctx.config['targets'].iteritems():
+            p.spawn(
+                nuke_one,
+                ctx,
+                {target: hostkey},
+                log,
+                should_unlock,
+                sync_clocks,
+                reboot_all,
+                )
+        for unnuked in p:
+            if unnuked:
+                total_unnuked.update(unnuked)
+    if total_unnuked:
+        log.error('Could not nuke the following targets:\n' + '\n  '.join(['targets:', ] + yaml.safe_dump(total_unnuked, default_flow_style=False).splitlines()))
+
+def nuke_one(ctx, targets, log, should_unlock, synch_clocks, reboot_all):
+    from teuthology.lock import unlock
+    ret = None
+    ctx = argparse.Namespace(
+        config=dict(targets=targets),
+        owner=ctx.owner,
+        synch_clocks=synch_clocks,
+        reboot_all=reboot_all,
+        teuthology_config=ctx.teuthology_config,
+        )
+    try:
+        nuke_helper(ctx, log)
+    except:
+        log.exception('Could not nuke all targets in %s', targets)
+        # not re-raising the so that parallel calls aren't killed
+        ret = targets
+    else:
+        if should_unlock:
+            for target in targets.keys():
+                unlock(ctx, target, ctx.owner)
+    return ret
 
-def nuke(ctx, log):
+def nuke_helper(ctx, log):
     from teuthology.task.internal import check_lock, connect
     check_lock(ctx, None)
     connect(ctx, None)
index 6430766e12b238d15731a4fbdc61ffab33c32afa..8c9a20584d6ec80dff5b04bba389fb540319279a 100644 (file)
@@ -157,18 +157,9 @@ def main():
         run_tasks(tasks=ctx.config['tasks'], ctx=ctx)
     finally:
         if not ctx.summary.get('success') and ctx.config.get('nuke-on-error'):
-            from teuthology.parallel import parallel
-            with parallel() as p:
-                for target, hostkey in ctx.config['targets'].iteritems():
-                    p.spawn(
-                        nuke,
-                        targets={target: hostkey},
-                        owner=ctx.owner,
-                        log=log,
-                        teuth_config=ctx.teuthology_config,
-                        # only unlock if we locked them in the first place
-                        should_unlock=ctx.lock,
-                        )
+            from teuthology.nuke import nuke
+            # only unlock if we locked them in the first place
+            nuke(ctx, log, ctx.lock)
         if ctx.archive is not None:
             with file(os.path.join(ctx.archive, 'summary.yaml'), 'w') as f:
                 yaml.safe_dump(ctx.summary, f, default_flow_style=False)
@@ -177,27 +168,6 @@ def main():
         import sys
         sys.exit(1)
 
-def nuke(targets, owner, log, teuth_config, should_unlock,
-         synch_clocks=True, reboot_all=True):
-    from teuthology.nuke import nuke
-    from teuthology.lock import unlock
-    ctx = argparse.Namespace(
-        config=dict(targets=targets),
-        owner=owner,
-        synch_clocks=synch_clocks,
-        reboot_all=reboot_all,
-        teuthology_config=teuth_config,
-        )
-    try:
-        nuke(ctx, log)
-    except:
-        log.exception('Could not nuke all targets in %s', targets)
-        # not re-raising the so that parallel calls aren't killed
-    else:
-        if should_unlock:
-            for target in targets.keys():
-                unlock(ctx, target, owner)
-
 def schedule():
     parser = argparse.ArgumentParser(description='Schedule ceph integration tests')
     parser.add_argument(