]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
Ports the teuthology.run command to docopt instead of argparse.
authorAndrew Schoen <aschoen@redhat.com>
Mon, 24 Nov 2014 23:01:33 +0000 (17:01 -0600)
committerAndrew Schoen <aschoen@redhat.com>
Sat, 6 Dec 2014 19:46:47 +0000 (13:46 -0600)
This is a pretty massive refactor to make the code more testable as
well. The coverage on teuthology.run was increased from 0% to 86% and
the overall project coverage increased to 24%.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
teuthology/run.py
teuthology/test/test_run.py [new file with mode: 0644]

index 5cb8c7cd7e325b6609579f94f6629b8db776e1aa..bd98874e0802f4ab9c1a111c3171f2cdd62b9b28 100644 (file)
@@ -9,24 +9,24 @@ from traceback import format_tb
 import teuthology
 from . import report
 from .job_status import get_status
-from .misc import get_user
-from .misc import read_config
+from .misc import get_user, merge_configs
 from .nuke import nuke
 from .run_tasks import run_tasks
 from .repo_utils import fetch_qa_suite
 from .results import email_results
+from .config import JobConfig, FakeNamespace
 
 log = logging.getLogger(__name__)
 
 
-def set_up_logging(ctx):
-    if ctx.verbose:
+def set_up_logging(verbose, archive):
+    if verbose:
         teuthology.log.setLevel(logging.DEBUG)
 
-    if ctx.archive is not None:
-        os.mkdir(ctx.archive)
+    if archive is not None:
+        os.mkdir(archive)
 
-        teuthology.setup_log_file(os.path.join(ctx.archive, 'teuthology.log'))
+        teuthology.setup_log_file(os.path.join(archive, 'teuthology.log'))
 
     install_except_hook()
 
@@ -43,27 +43,27 @@ def install_except_hook():
     sys.excepthook = log_exception
 
 
-def write_initial_metadata(ctx):
-    if ctx.archive is not None:
-        with file(os.path.join(ctx.archive, 'pid'), 'w') as f:
+def write_initial_metadata(archive, config, name, description, owner):
+    if archive is not None:
+        with file(os.path.join(archive, 'pid'), 'w') as f:
             f.write('%d' % os.getpid())
 
-        with file(os.path.join(ctx.archive, 'owner'), 'w') as f:
-            f.write(ctx.owner + '\n')
+        with file(os.path.join(archive, 'owner'), 'w') as f:
+            f.write(owner + '\n')
 
-        with file(os.path.join(ctx.archive, 'orig.config.yaml'), 'w') as f:
-            yaml.safe_dump(ctx.config, f, default_flow_style=False)
+        with file(os.path.join(archive, 'orig.config.yaml'), 'w') as f:
+            yaml.safe_dump(config, f, default_flow_style=False)
 
         info = {
-            'name': ctx.name,
-            'description': ctx.description,
-            'owner': ctx.owner,
+            'name': name,
+            'description': description,
+            'owner': owner,
             'pid': os.getpid(),
         }
-        if 'job_id' in ctx.config:
-            info['job_id'] = ctx.config['job_id']
+        if 'job_id' in config:
+            info['job_id'] = config['job_id']
 
-        with file(os.path.join(ctx.archive, 'info.yaml'), 'w') as f:
+        with file(os.path.join(archive, 'info.yaml'), 'w') as f:
             yaml.safe_dump(info, f, default_flow_style=False)
 
 
@@ -71,6 +71,9 @@ def fetch_tasks_if_needed(job_config):
     """
     Fetch the suite repo (and include it in sys.path) so that we can use its
     tasks.
+
+    Returns the suite_path. The existing suite_path will be returned if the
+    tasks can be imported, if not a new suite_path will try to be determined.
     """
     # Any scheduled job will already have the suite checked out and its
     # $PYTHONPATH set. We can check for this by looking for 'suite_path'
@@ -84,7 +87,8 @@ def fetch_tasks_if_needed(job_config):
     try:
         import tasks
         log.info("Found tasks at %s", os.path.dirname(tasks.__file__))
-        return
+        # tasks found with the existing suite branch, return it
+        return suite_path
     except ImportError:
         log.info("Tasks not found; will attempt to fetch")
 
@@ -92,69 +96,79 @@ def fetch_tasks_if_needed(job_config):
     suite_branch = job_config.get('suite_branch', ceph_branch)
     suite_path = fetch_qa_suite(suite_branch)
     sys.path.insert(1, suite_path)
-    job_config['suite_path'] = suite_path
-
+    return suite_path
 
-def main(ctx):
-    set_up_logging(ctx)
 
-    if ctx.owner is None:
-        ctx.owner = get_user()
+def setup_config(config_paths):
+    """ Takes a list of config yaml files and combines them
+        into a single dictionary. Processes / validates the dictionary and then
+        returns a JobConfig instance.
+    """
+    config = merge_configs(config_paths)
 
     # Older versions of teuthology stored job_id as an int. Convert it to a str
     # if necessary.
-    job_id = ctx.config.get('job_id')
+    job_id = config.get('job_id')
     if job_id is not None:
         job_id = str(job_id)
-        ctx.config['job_id'] = job_id
-
-    write_initial_metadata(ctx)
-    report.try_push_job_info(ctx.config, dict(status='running'))
+        config['job_id'] = job_id
 
-    if 'targets' in ctx.config and 'roles' in ctx.config:
-        targets = len(ctx.config['targets'])
-        roles = len(ctx.config['roles'])
+    # targets must be >= than roles
+    if 'targets' in config and 'roles' in config:
+        targets = len(config['targets'])
+        roles = len(config['roles'])
         assert targets >= roles, \
             '%d targets are needed for all roles but found %d listed.' % (
                 roles, targets)
 
-    machine_type = ctx.machine_type
+    return JobConfig.from_dict(config)
+
+
+def get_machine_type(machine_type, config):
+    """ If no machine_type is given, find the appropriate machine_type
+        from the given config.
+    """
     if machine_type is None:
-        fallback_default = ctx.config.get('machine_type', 'plana')
-        machine_type = ctx.config.get('machine-type', fallback_default)
+        fallback_default = config.get('machine_type', 'plana')
+        machine_type = config.get('machine-type', fallback_default)
 
-    if ctx.block:
-        assert ctx.lock, \
-            'the --block option is only supported with the --lock option'
+    return machine_type
 
-    read_config(ctx)
 
-    log.debug('\n  '.join(['Config:', ] + yaml.safe_dump(
-        ctx.config, default_flow_style=False).splitlines()))
+def get_summary(owner, description):
+    summary = dict(success=True)
+    summary['owner'] = owner
 
-    ctx.summary = dict(success=True)
+    if description is not None:
+        summary['description'] = description
 
-    ctx.summary['owner'] = ctx.owner
+    return summary
 
-    if ctx.description is not None:
-        ctx.summary['description'] = ctx.description
 
-    if not 'tasks' in ctx.config:
+def validate_tasks(config):
+    """ Ensures that config tasks do not include 'kernal'.
+
+        If there is not tasks, return an empty list.
+    """
+    if 'tasks' not in config:
         log.warning('No tasks specified. Continuing anyway...')
-        ctx.config['tasks'] = []
+        # return the default value for tasks
+        return []
 
-    for task in ctx.config['tasks']:
+    for task in config['tasks']:
         msg = ('kernel installation shouldn be a base-level item, not part ' +
                'of the tasks list')
         assert 'kernel' not in task, msg
 
+
+def get_initial_tasks(lock, config, machine_type):
     init_tasks = []
-    if ctx.lock:
+    if lock:
         msg = ('You cannot specify targets in a config file when using the ' +
                '--lock option')
-        assert 'targets' not in ctx.config, msg
+        assert 'targets' not in config, msg
         init_tasks.append({'internal.lock_machines': (
-            len(ctx.config['roles']), machine_type)})
+            len(config['roles']), machine_type)})
 
     init_tasks.extend([
         {'internal.save_config': None},
@@ -164,13 +178,16 @@ def main(ctx):
         {'internal.serialize_remote_roles': None},
         {'internal.check_conflict': None},
     ])
-    if not ctx.config.get('use_existing_cluster', False):
+
+    if not config.get('use_existing_cluster', False):
         init_tasks.extend([
             {'internal.check_ceph_data': None},
             {'internal.vm_setup': None},
         ])
-    if 'kernel' in ctx.config:
-        init_tasks.append({'kernel': ctx.config['kernel']})
+
+    if 'kernel' in config:
+        init_tasks.append({'kernel': config['kernel']})
+
     init_tasks.extend([
         {'internal.base': None},
         {'internal.archive': None},
@@ -180,42 +197,87 @@ def main(ctx):
         {'internal.timer': None},
     ])
 
-    ctx.config['tasks'][:0] = init_tasks
+    return init_tasks
+
+
+def report_outcome(config, archive, summary, fake_ctx):
+    """ Reports on the final outcome of the command. """
+    status = get_status(summary)
+    passed = status == 'pass'
+
+    if not passed and bool(config.get('nuke-on-error')):
+        # only unlock if we locked them in the first place
+        nuke(fake_ctx, fake_ctx.lock)
+
+    if archive is not None:
+        with file(os.path.join(archive, 'summary.yaml'), 'w') as f:
+            yaml.safe_dump(summary, f, default_flow_style=False)
+
+    with contextlib.closing(StringIO.StringIO()) as f:
+        yaml.safe_dump(summary, f)
+        log.info('Summary data:\n%s' % f.getvalue())
+
+    with contextlib.closing(StringIO.StringIO()) as f:
+        if ('email-on-error' in config
+                and not passed):
+            yaml.safe_dump(summary, f)
+            yaml.safe_dump(config, f)
+            emsg = f.getvalue()
+            subject = "Teuthology error -- %s" % summary[
+                'failure_reason']
+            email_results(subject, "Teuthology", config[
+                          'email-on-error'], emsg)
+
+    report.try_push_job_info(config, summary)
+
+    if passed:
+        log.info(status)
+    else:
+        log.info(str(status).upper())
+        sys.exit(1)
+
+
+def main(ctx):
+    set_up_logging(ctx["--verbose"], ctx["--archive"])
+
+    if ctx["--owner"] is None:
+        ctx["--owner"] = get_user()
+
+    ctx["<config>"] = setup_config(ctx["<config>"])
+
+    write_initial_metadata(ctx["--archive"], ctx["<config>"], ctx["--name"], ctx["--description"], ctx["--owner"])
+    report.try_push_job_info(ctx["<config>"], dict(status='running'))
+
+    machine_type = get_machine_type(ctx["--machine-type"])
+
+    if ctx["--block"]:
+        assert ctx["--lock"], \
+            'the --block option is only supported with the --lock option'
+
+    log.debug('\n  '.join(['Config:', ] + yaml.safe_dump(
+        ctx["<config>"], default_flow_style=False).splitlines()))
+
+    ctx["summary"] = get_summary(ctx["--owner"], ctx["--description"])
+
+    ctx["<config>"]["tasks"] = validate_tasks(ctx["<config>"])
+
+    init_tasks = get_initial_tasks(ctx["--lock"], ctx["<config>"], machine_type)
+
+    ctx["<config>"]['tasks'].insert(0, init_tasks)
+
+    if ctx["--suite-path"] is not None:
+        ctx["<config>"]['suite_path'] = ctx["--suite-path"]
 
-    if ctx.suite_path is not None:
-        ctx.config['suite_path'] = ctx.suite_path
+    # fetches the tasks and returns a new suite_path if needed
+    ctx["<config>"]["suite_path"] = fetch_tasks_if_needed(ctx["<config>"])
 
-    fetch_tasks_if_needed(ctx.config)
+    # create a FakeNamespace instance that mimics the old argparse way of doing things
+    # we do this so we can pass it to run_tasks without porting those tasks to the
+    # new way of doing things right now
+    fake_ctx = FakeNamespace(ctx)
 
     try:
-        run_tasks(tasks=ctx.config['tasks'], ctx=ctx)
+        run_tasks(tasks=ctx["<config>"]['tasks'], ctx=fake_ctx)
     finally:
-        status = get_status(ctx.summary)
-        passed = status == 'pass'
-        if not passed and bool(ctx.config.get('nuke-on-error')):
-            # only unlock if we locked them in the first place
-            nuke(ctx, ctx.lock)
-        if ctx.archive is not None:
-            with file(os.path.join(ctx.archive, 'summary.yaml'), 'w') as f:
-                yaml.safe_dump(ctx.summary, f, default_flow_style=False)
-        with contextlib.closing(StringIO.StringIO()) as f:
-            yaml.safe_dump(ctx.summary, f)
-            log.info('Summary data:\n%s' % f.getvalue())
-        with contextlib.closing(StringIO.StringIO()) as f:
-            if ('email-on-error' in ctx.config
-                    and not passed):
-                yaml.safe_dump(ctx.summary, f)
-                yaml.safe_dump(ctx.config, f)
-                emsg = f.getvalue()
-                subject = "Teuthology error -- %s" % ctx.summary[
-                    'failure_reason']
-                email_results(subject, "Teuthology", ctx.config[
-                              'email-on-error'], emsg)
-
-        report.try_push_job_info(ctx.config, ctx.summary)
-
-        if passed:
-            log.info(status)
-        else:
-            log.info(str(status).upper())
-            sys.exit(1)
+        # print to stdout the results and possibly send an email on any errors
+        report_outcome(ctx["<config>"], ctx["--archive"], ctx["summary"], fake_ctx)
diff --git a/teuthology/test/test_run.py b/teuthology/test/test_run.py
new file mode 100644 (file)
index 0000000..d43d000
--- /dev/null
@@ -0,0 +1,121 @@
+import pytest
+
+from mock import patch, call, Mock
+
+from teuthology import run
+
+
+class TestRun(object):
+    """ Tests for teuthology.run """
+
+    @patch("teuthology.log.setLevel")
+    @patch("teuthology.setup_log_file")
+    @patch("os.mkdir")
+    def test_set_up_logging(self, m_mkdir, m_setup_log_file, m_setLevel):
+        run.set_up_logging(True, "path/to/archive")
+        m_mkdir.assert_called_with("path/to/archive")
+        m_setup_log_file.assert_called_with("path/to/archive/teuthology.log")
+        assert m_setLevel.called
+
+    # because of how we import things, mock merge_configs from run - where it's used
+    # see: http://www.voidspace.org.uk/python/mock/patch.html#where-to-patch
+    @patch("teuthology.run.merge_configs")
+    def test_setup_config(self, m_merge_configs):
+        config = {"job_id": 1, "foo": "bar"}
+        m_merge_configs.return_value = config
+        result = run.setup_config(["some/config.yaml"])
+        assert m_merge_configs.called
+        assert result.job_id == "1"
+        assert result["foo"] == "bar"
+
+    @patch("teuthology.run.merge_configs")
+    def test_setup_config_targets_ok(self, m_merge_configs):
+        config = {"targets": range(4), "roles": range(2)}
+        m_merge_configs.return_value = config
+        result = run.setup_config(["some/config.yaml"])
+        assert result.targets
+        assert result.roles
+
+    @patch("teuthology.run.merge_configs")
+    def test_setup_config_targets_invalid(self, m_merge_configs):
+        config = {"targets": range(2), "roles": range(4)}
+        m_merge_configs.return_value = config
+        with pytest.raises(AssertionError):
+            run.setup_config(["some/config.yaml"])
+
+    @patch("__builtin__.file")
+    def test_write_initial_metadata(self, m_file):
+        config = {"job_id": "123", "foo": "bar"}
+        run.write_initial_metadata(
+            "some/archive/dir",
+            config,
+            "the_name",
+            "the_description",
+            "the_owner",
+        )
+        expected = [
+            call('some/archive/dir/pid', 'w'),
+            call('some/archive/dir/owner', 'w'),
+            call('some/archive/dir/orig.config.yaml', 'w'),
+            call('some/archive/dir/info.yaml', 'w')
+        ]
+        assert m_file.call_args_list == expected
+
+    def test_get_machine_type(self):
+        result = run.get_machine_type(None, {"machine-type": "the_machine_type"})
+        assert result == "the_machine_type"
+
+    def test_get_summary(self):
+        result = run.get_summary("the_owner", "the_description")
+        assert result == {"owner": "the_owner", "description": "the_description", "success": True}
+        result = run.get_summary("the_owner", None)
+        assert result == {"owner": "the_owner", "success": True}
+
+    def test_validate_tasks_invalid(self):
+        config = {"tasks": {"kernel": "can't be here"}}
+        with pytest.raises(AssertionError):
+            run.validate_tasks(config)
+
+    def test_validate_task_no_tasks(self):
+        result = run.validate_tasks({})
+        assert result == []
+
+    def test_get_initial_tasks_invalid(self):
+        with pytest.raises(AssertionError):
+            run.get_initial_tasks(True, {"targets": "can't be here"}, "machine_type")
+
+    def test_get_inital_tasks(self):
+        config = {"roles": range(2), "kernel": "the_kernel", "use_existing_cluster": False}
+        result = run.get_initial_tasks(True, config, "machine_type")
+        assert {"internal.lock_machines": (2, "machine_type")} in result
+        assert {"kernel": "the_kernel"} in result
+        # added because use_existing_cluster == False
+        assert {'internal.vm_setup': None} in result
+
+    @patch("teuthology.run.fetch_qa_suite")
+    def test_fetch_tasks_if_needed(self, m_fetch_qa_suite):
+        config = {"suite_path": "/some/suite/path", "suite_branch": "feature_branch"}
+        m_fetch_qa_suite.return_value = "/some/other/suite/path"
+        result = run.fetch_tasks_if_needed(config)
+        m_fetch_qa_suite.assert_called_with("feature_branch")
+        assert result == "/some/other/suite/path"
+
+    @patch("teuthology.run.get_status")
+    @patch("teuthology.run.nuke")
+    @patch("yaml.safe_dump")
+    @patch("teuthology.report.try_push_job_info")
+    @patch("teuthology.run.email_results")
+    @patch("__builtin__.file")
+    @patch("sys.exit")
+    def test_report_outcome(self, m_sys_exit, m_file, m_email_results, m_try_push_job_info, m_safe_dump, m_nuke, m_get_status):
+        config = {"nuke-on-error": True, "email-on-error": True}
+        m_get_status.return_value = "fail"
+        fake_ctx = Mock()
+        summary = {"failure_reason": "reasons"}
+        run.report_outcome(config, "the/archive/path", summary, fake_ctx)
+        assert m_nuke.called
+        m_try_push_job_info.assert_called_with(config, summary)
+        m_file.assert_called_with("the/archive/path/summary.yaml", "w")
+        assert m_email_results.called
+        assert m_file.called
+        assert m_sys_exit.called