]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
Remove some duplicate code.
authorZack Cerza <zack@cerza.org>
Mon, 2 Jun 2014 23:29:55 +0000 (18:29 -0500)
committerZack Cerza <zack@cerza.org>
Tue, 3 Jun 2014 00:01:09 +0000 (19:01 -0500)
Well, not exactly duplicate, but functionally compatible. No sense in
maintaining two "walk the filesystem to assemble job information"
utilities.

Signed-off-by: Zack Cerza <zack.cerza@inktank.com>
teuthology/kill.py

index d090ff40c7c2ba2af2f619062d96e5c1cbd6b5aa..c8f746e3eaebceae0c190d3b138f6da82ac90917 100755 (executable)
@@ -34,10 +34,11 @@ def main(args):
 def kill_run(run_name, archive_base=None, owner=None, machine_type=None,
              preserve_queue=False):
     run_info = {}
+    serializer = report.ResultsSerializer(archive_base)
     if archive_base:
         run_archive_dir = os.path.join(archive_base, run_name)
         if os.path.isdir(run_archive_dir):
-            run_info = find_run_info(run_archive_dir)
+            run_info = find_run_info(serializer, run_name)
             machine_type = run_info['machine_type']
             owner = run_info['owner']
         elif machine_type is None:
@@ -55,55 +56,37 @@ def kill_run(run_name, archive_base=None, owner=None, machine_type=None,
 
 def kill_job(run_name, job_id, archive_base=None, owner=None,
              machine_type=None):
-    job_archive_dir = os.path.join(archive_base, run_name, job_id)
-    job_info = find_job_info(job_archive_dir)
+    serializer = report.ResultsSerializer(archive_base)
+    job_info = serializer.job_info(run_name, job_id)
     owner = job_info['owner']
     kill_processes(run_name, [job_info.get('pid')])
     targets = dict(targets=job_info.get('targets', {}))
     nuke_targets(targets, owner)
 
 
-def find_run_info(run_archive_dir):
+def find_run_info(serializer, run_name):
     log.info("Assembling run information...")
     run_info_fields = [
         'machine_type',
         'owner',
     ]
 
-    run_info = dict(pids=[])
+    pids = []
+    run_info = {}
     job_info = {}
-    for job_id in os.listdir(run_archive_dir):
-        job_dir = os.path.join(run_archive_dir, job_id)
+    for (job_id, job_dir) in serializer.jobs_for_run(run_name).iteritems():
         if not os.path.isdir(job_dir):
             continue
-        job_info = find_job_info(job_dir)
+        job_info = serializer.job_info(run_name, job_id)
         for key in job_info.keys():
             if key in run_info_fields and key not in run_info:
                 run_info[key] = job_info[key]
         if 'pid' in job_info:
-            run_info['pids'].append(job_info['pid'])
+            pids.append(job_info['pid'])
+    run_info['pids'] = pids
     return run_info
 
 
-def find_job_info(job_archive_dir):
-    job_info = {}
-
-    info_file = os.path.join(job_archive_dir, 'info.yaml')
-    if os.path.isfile(info_file):
-        job_info.update(yaml.safe_load(open(info_file, 'r')))
-
-    conf_file = os.path.join(job_archive_dir, 'config.yaml')
-    if os.path.isfile(conf_file):
-        job_info.update(yaml.safe_load(open(conf_file, 'r')))
-    else:
-        conf_file = os.path.join(job_archive_dir, 'orig.config.yaml')
-        if os.path.isfile(conf_file):
-            log.debug("config.yaml not found but orig.config.yaml found")
-            job_info.update(yaml.safe_load(open(conf_file, 'r')))
-
-    return job_info
-
-
 def remove_paddles_jobs(run_name):
     jobs = report.ResultsReporter().get_jobs(run_name, fields=['status'])
     job_ids = [job['job_id'] for job in jobs if job['status'] == 'queued']