]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Refactor teuthology.schedule...
authorZack Cerza <zack@cerza.org>
Mon, 9 Jun 2014 18:31:11 +0000 (13:31 -0500)
committerZack Cerza <zack@cerza.org>
Mon, 9 Jun 2014 23:31:01 +0000 (18:31 -0500)
... to separate the assembling of the job config from the scheduling of
the job. Also port its argument parsing to docopt.

Signed-off-by: Zack Cerza <zack.cerza@inktank.com>
scripts/schedule.py
teuthology/schedule.py

index 6a43e5e6d82586b1036853631b1ac4b26c4dfcc9..25f74940fe042f817ab138fd2d16efa885f2e659 100644 (file)
@@ -1,85 +1,42 @@
-import argparse
+import docopt
 
 import teuthology.misc
 import teuthology.schedule
 
+doc = """
+usage: teuthology-schedule -h
+       teuthology-schedule [options] <conf_file> [<conf_file> ...]
 
-def main():
-    teuthology.schedule.main(parse_args())
+Schedule ceph integration tests
+
+positional arguments:
+  <conf_file>                          Config file to read
 
+optional arguments:
+  -h, --help                           Show this help message and exit
+  -v, --verbose                        Be more verbose
+  -n <name>, --name <name>             Name of suite run the job is part of
+  -d <desc>, --description <desc>      Job description
+  -o <owner>, --owner <owner>          Job owner
+  -w <worker>, --worker <worker>       Which worker to use (type of machine)
+                                       [default: plana]
+  -p <priority>, --priority <priority> Job priority (lower is sooner)
+                                       [default: 1000]
+  -N <num>, --num <num>                Number of times to run/queue the job
+                                       [default: 1]
 
-def parse_args():
-    parser = argparse.ArgumentParser(
-        description='Schedule ceph integration tests')
-    parser.add_argument(
-        'config',
-        metavar='CONFFILE',
-        nargs='*',
-        type=teuthology.misc.config_file,
-        action=teuthology.misc.MergeConfig,
-        default={},
-        help='config file to read',
-    )
-    parser.add_argument(
-        '--name',
-        help='name of suite run the job is part of',
-    )
-    parser.add_argument(
-        '--last-in-suite',
-        action='store_true',
-        default=False,
-        help='mark the last job in a suite so suite post-processing can be ' +
-        'run',
-    )
-    parser.add_argument(
-        '--email',
-        help='where to send the results of a suite (only applies to the ' +
-        'last job in a suite)',
-    )
-    parser.add_argument(
-        '--timeout',
-        help='how many seconds to wait for jobs to finish before emailing ' +
-        'results (only applies to the last job in a suite',
-        type=int,
-    )
-    parser.add_argument(
-        '--description',
-        help='job description',
-    )
-    parser.add_argument(
-        '--owner',
-        help='job owner',
-    )
-    parser.add_argument(
-        '-n', '--num',
-        default=1,
-        type=int,
-        help='number of times to run/queue the job'
-    )
-    parser.add_argument(
-        '-p', '--priority',
-        default=1000,
-        type=int,
-        help='beanstalk priority (lower is sooner)'
-    )
-    parser.add_argument(
-        '-v', '--verbose',
-        action='store_true',
-        default=False,
-        help='be more verbose',
-    )
-    parser.add_argument(
-        '-w', '--worker',
-        default='plana',
-        help='which worker to use (type of machine)',
-    )
+  --last-in-suite                      Mark the last job in a suite so suite
+                                       post-processing can be run
+                                       [default: False]
+  --email <email>                      Where to send the results of a suite.
+                                       Only applies to the last job in a suite.
+  --timeout <timeout>                  How many seconds to wait for jobs to
+                                       finish before emailing results. Only
+                                       applies to the last job in a suite.
 
-    args = parser.parse_args()
+"""
 
-    if not args.last_in_suite:
-        msg = '--email is only applicable to the last job in a suite'
-        assert not args.email, msg
-        msg = '--timeout is only applicable to the last job in a suite'
-        assert not args.timeout, msg
 
-    return args
+def main():
+    args = docopt.docopt(doc)
+    teuthology.schedule.main(args)
index 6efe0518d4c30ee3bc70a6f99da637c5024c19a9..58cb77b8f9fabdbbfc9dd1d7bbfd98ed79c490c7 100644 (file)
@@ -1,50 +1,80 @@
 import yaml
 
 import teuthology.beanstalk
-from teuthology.misc import get_user
-from teuthology.misc import read_config
+from teuthology.misc import config_file, deep_merge, get_user
 from teuthology import report
 
 
-def main(ctx):
-    if ctx.owner is None:
-        ctx.owner = 'scheduled_{user}'.format(user=get_user())
-    read_config(ctx)
+def main(args):
+    if not args['--last-in-suite']:
+        if args['--email']:
+            raise ValueError(
+                '--email is only applicable to the last job in a suite')
+        if args['--timeout']:
+            raise ValueError(
+                '--timeout is only applicable to the last job in a suite')
+    job_config = build_config(args)
+    schedule_job(job_config, args['--num'])
 
-    beanstalk = teuthology.beanstalk.connect()
-
-    tube = ctx.worker
-    beanstalk.use(tube)
 
+def build_config(args):
+    """
+    Given a dict of arguments, build a job config
+    """
+    config_paths = args.get('<conf_file>', list())
+    conf_dict = dict()
+    for conf_path in config_paths:
+        conf_dict = deep_merge(conf_dict, config_file(conf_path))
     # strip out targets; the worker will allocate new ones when we run
     # the job with --lock.
-    if ctx.config.get('targets'):
-        del ctx.config['targets']
+    if 'targets' in conf_dict:
+        del conf_dict['targets']
+    args['config'] = conf_dict
+
+    owner = args['--owner']
+    if owner is None:
+        owner = 'scheduled_{user}'.format(user=get_user())
 
     job_config = dict(
-        name=ctx.name,
-        last_in_suite=ctx.last_in_suite,
-        email=ctx.email,
-        description=ctx.description,
-        owner=ctx.owner,
-        verbose=ctx.verbose,
-        machine_type=ctx.worker,
+        name=args['--name'],
+        last_in_suite=args['--last-in-suite'],
+        email=args['--email'],
+        description=args['--description'],
+        owner=owner,
+        verbose=args['--verbose'],
+        machine_type=args['--worker'],
+        tube=args['--worker'],
+        priority=int(args['--priority']),
     )
-    # Merge job_config and ctx.config
-    job_config.update(ctx.config)
-    if ctx.timeout is not None:
-        job_config['results_timeout'] = ctx.timeout
+    # Update the dict we just created, and not the other way around, to let
+    # settings in the yaml override what's passed on the command line. This is
+    # primarily to accommodate jobs with multiple machine types.
+    job_config.update(conf_dict)
+    if args['--timeout'] is not None:
+        job_config['results_timeout'] = args['--timeout']
+    return job_config
 
+
+def schedule_job(job_config, num=1):
+    """
+    Schedule a job.
+
+    :param job_config: The complete job dict
+    :param num:      The number of times to schedule the job
+    """
+    num = int(num)
     job = yaml.safe_dump(job_config)
-    num = ctx.num
+    tube = job_config.pop('tube')
+    beanstalk = teuthology.beanstalk.connect()
+    beanstalk.use(tube)
     while num > 0:
         jid = beanstalk.put(
             job,
             ttr=60 * 60 * 24,
-            priority=ctx.priority,
+            priority=job_config['priority'],
         )
         print 'Job scheduled with name {name} and ID {jid}'.format(
-            name=ctx.name, jid=jid)
+            name=job_config['name'], jid=jid)
         job_config['job_id'] = str(jid)
         report.try_push_job_info(job_config, dict(status='queued'))
         num -= 1