]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
Moved task.tests into it's own module and created tests.test_run
authorAndrew Schoen <aschoen@redhat.com>
Tue, 3 Feb 2015 20:58:39 +0000 (14:58 -0600)
committerAndrew Schoen <aschoen@redhat.com>
Thu, 5 Feb 2015 20:19:23 +0000 (14:19 -0600)
This sets up the basic structure for teuthology integration testing.
Any tests put in teuthology/task/tests will be autodiscovered and run
when 'tests' is in the task list of a job's config.

Each test will be given ctx and config and it's output will be logged
using our logger.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
teuthology/task/tests.py [deleted file]
teuthology/task/tests/__init__.py [new file with mode: 0644]
teuthology/task/tests/test_run.py [new file with mode: 0644]

diff --git a/teuthology/task/tests.py b/teuthology/task/tests.py
deleted file mode 100644 (file)
index 9d9e600..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-"""
-A place to put various testing functions for teuthology. Maybe at some point we
-can turn this into a proper test suite of sorts.
-"""
-import logging
-import pytest
-
-from functools import wraps
-
-from teuthology.exceptions import CommandFailedError
-
-log = logging.getLogger(__name__)
-
-
-def log_test_results(f):
-    """
-    Use this to decorate test functions to log the result of
-    the test in the teuthology.log.
-    """
-    # TODO: it'd be nice to have the output from pytest use our logger
-    # I tried a few different ways, but couldn't get it to work so I settled
-    # on using this decorator instead.
-    @wraps(f)
-    def func(ctx, config):
-        name = f.func_name
-        try:
-            log.info("running {name}...".format(name=name))
-            f(ctx, config)
-        except AssertionError:
-            log.error("***** FAILED {name}".format(name=name))
-            ctx.summary["status"] = "fail"
-            ctx.summary["failure_reason"] = "failed: {0}".format(name)
-            raise
-        except Exception:
-            log.error("***** ERROR {name}".format(name=name))
-            ctx.summary["status"] = "fail"
-            ctx.summary["failure_reason"] = "error: {0}".format(name)
-            raise
-        else:
-            log.info("***** PASSED {name}".format(name=name))
-
-    return func
-
-
-@log_test_results
-def test_command_failed_label(ctx, config):
-    result = ""
-    try:
-        force_command_failure(ctx, config)
-    except CommandFailedError as e:
-        result = str(e)
-
-    assert "working as expected" in result
-
-
-def force_command_failure(ctx, config):
-    log.info("forcing a command failure...")
-    ctx.cluster.run(
-        args=["python", "-c", "assert False"],
-        label="working as expected, nothing to see here"
-    )
-
-
-@pytest.fixture
-def ctx():
-    return {}
-
-
-@pytest.fixture
-def config():
-    return []
-
-
-class TeuthologyContextPlugin(object):
-    def __init__(self, ctx, config):
-        self.ctx = ctx
-        self.config = config
-
-    # this is pytest hook for generating tests with custom parameters
-    def pytest_generate_tests(self, metafunc):
-        # pass the teuthology ctx and config to each test method
-        metafunc.parametrize(["ctx", "config"], [(self.ctx, self.config),])
-
-
-def task(ctx, config):
-    # use pytest to find any test_* methods in this file
-    # and execute them with the teuthology ctx and config args
-    status = pytest.main(
-        args=[
-            '-s', '-q',
-            '--pyargs', __name__
-        ],
-        plugins=[TeuthologyContextPlugin(ctx, config)]
-    )
-    if status == 0:
-        log.info("OK. All tests passed!")
-    else:
-        log.error("FAIL. Saw test failures...")
-        ctx.summary["status"] = "fail"
diff --git a/teuthology/task/tests/__init__.py b/teuthology/task/tests/__init__.py
new file mode 100644 (file)
index 0000000..661cc21
--- /dev/null
@@ -0,0 +1,83 @@
+"""
+This task is used to integration test teuthology. Including this
+task in your yaml config will execute pytest which finds any tests in
+the current directory.  Each test that is discovered will be passed the
+teuthology ctx and config args that each teuthology task usually gets.
+This allows the tests to operate against the cluster.
+
+An example:
+
+tasks
+  - tests:
+
+"""
+import logging
+import pytest
+
+
+log = logging.getLogger(__name__)
+
+
+@pytest.fixture
+def ctx():
+    return {}
+
+
+@pytest.fixture
+def config():
+    return []
+
+
+class TeuthologyContextPlugin(object):
+    def __init__(self, ctx, config):
+        self.ctx = ctx
+        self.config = config
+
+    # this is pytest hook for generating tests with custom parameters
+    def pytest_generate_tests(self, metafunc):
+        # pass the teuthology ctx and config to each test method
+        metafunc.parametrize(["ctx", "config"], [(self.ctx, self.config),])
+
+    # log the outcome of each test
+    def pytest_runtest_makereport(self, __multicall__, item, call):
+        report = __multicall__.execute()
+
+        # after the test has been called, get it's report and log it
+        if call.when == 'call':
+            # item.location[0] is a slash delimeted path to the test file
+            # being ran. We only want the portion after teuthology.task.tests
+            test_path = item.location[0].replace("/", ".").split(".")
+            test_path = ".".join(test_path[4:-1])
+            # removes the string '[ctx0, config0]' after the test name
+            test_name = item.location[2].split("[")[0]
+            msg = "{path}:{name}".format(path=test_path, name=test_name)
+            if report.passed:
+                log.info("{msg} PASSED!".format(msg=msg))
+            else:
+                log.error("{msg} FAILED!".format(msg=msg))
+                log.error("{msg} failed with: '{info}'".format(
+                    msg=msg,
+                    info=call.excinfo
+                ))
+
+        return report
+
+
+def task(ctx, config):
+    """
+    Use pytest to recurse through this directory, finding any tests
+    and then executing them with the teuthology ctx and config args.
+    Your tests must follow standard pytest conventions to be discovered.
+    """
+    status = pytest.main(
+        args=[
+            '-s', '-q',
+            '--pyargs', __name__
+        ],
+        plugins=[TeuthologyContextPlugin(ctx, config)]
+    )
+    if status == 0:
+        log.info("OK. All tests passed!")
+    else:
+        log.error("FAIL. Saw test failures...")
+        ctx.summary["status"] = "fail"
diff --git a/teuthology/task/tests/test_run.py b/teuthology/task/tests/test_run.py
new file mode 100644 (file)
index 0000000..dbb03fd
--- /dev/null
@@ -0,0 +1,40 @@
+import logging
+import pytest
+
+from StringIO import StringIO
+
+from teuthology.exceptions import CommandFailedError
+
+log = logging.getLogger(__name__)
+
+
+class TestRun(object):
+    """
+    Tests to see if we can make remote procedure calls to the current cluster
+    """
+
+    def test_command_failed_label(self, ctx, config):
+        result = ""
+        try:
+            ctx.cluster.run(
+                args=["python", "-c", "assert False"],
+                label="working as expected, nothing to see here"
+            )
+        except CommandFailedError as e:
+            result = str(e)
+
+        assert "working as expected" in result
+
+    def test_command_failed_no_label(self, ctx, config):
+        with pytest.raises(CommandFailedError):
+            ctx.cluster.run(
+                args=["python", "-c", "assert False"],
+            )
+
+    def test_command_success(self, ctx, config):
+        result = StringIO()
+        ctx.cluster.run(
+            args=["python", "-c", "print 'hi'"],
+            stdout=result
+        )
+        assert result.getvalue().strip() == "hi"