]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
put a job into a 'waiting' status while it's locking machines 392/head
authorAndrew Schoen <aschoen@redhat.com>
Thu, 11 Dec 2014 19:40:51 +0000 (13:40 -0600)
committerAndrew Schoen <aschoen@redhat.com>
Thu, 11 Dec 2014 19:41:06 +0000 (13:41 -0600)
Signed-off-by: Andrew Schoen <aschoen@redhat.com>
teuthology/task/internal.py

index 0075d976c48c22ed30d25a6e2234b50b22be2808..4e5bebe69a0ce9a4091ff9d9008ce0bd30a31e33 100644 (file)
@@ -19,6 +19,7 @@ from teuthology.job_status import get_status, set_status
 from teuthology.config import config as teuth_config
 from teuthology.parallel import parallel
 from ..orchestra import cluster, remote, run
+from .. import report
 
 log = logging.getLogger(__name__)
 
@@ -74,6 +75,9 @@ def lock_machines(ctx, config):
     # We want to make sure there are always this many machines available
     to_reserve = 5
 
+    # change the status during the locking process
+    report.try_push_job_info(config, dict(status='waiting'))
+
     while True:
         # get a candidate list of machines
         machines = lock.list_locks(machine_type=machine_type, up=True,
@@ -138,8 +142,13 @@ def lock_machines(ctx, config):
                 ctx.config['targets'] = newscandict
             else:
                 ctx.config['targets'] = newly_locked
-            # FIXME: Ugh.
-            log.info('\n  '.join(['Locked targets:', ] + yaml.safe_dump(ctx.config['targets'], default_flow_style=False).splitlines()))
+            locked_targets = yaml.safe_dump(
+                ctx.config['targets'],
+                default_flow_style=False
+            ).splitlines()
+            log.info('\n  '.join(['Locked targets:', ] + locked_targets))
+            # successfully locked machines, change status back to running
+            report.try_push_job_info(config, dict(status='running'))
             break
         elif not ctx.block:
             assert 0, 'not enough machines are available'