]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
Make teuthology.suite a subpackage
authorZack Cerza <zack@redhat.com>
Wed, 1 Jun 2016 21:49:32 +0000 (15:49 -0600)
committerZack Cerza <zack@redhat.com>
Fri, 3 Jun 2016 21:51:41 +0000 (15:51 -0600)
Signed-off-by: Zack Cerza <zack@redhat.com>
20 files changed:
teuthology/describe_tests.py
teuthology/matrix.py [deleted file]
teuthology/packaging.py
teuthology/suite.py [deleted file]
teuthology/suite/__init__.py [new file with mode: 0644]
teuthology/suite/build_matrix.py [new file with mode: 0644]
teuthology/suite/matrix.py [new file with mode: 0644]
teuthology/suite/placeholder.py [new file with mode: 0644]
teuthology/suite/run.py [new file with mode: 0644]
teuthology/suite/test/suites/noop/noop.yaml [new file with mode: 0644]
teuthology/suite/test/test_build_matrix.py [new file with mode: 0644]
teuthology/suite/test/test_init.py [new file with mode: 0644]
teuthology/suite/test/test_matrix.py [new file with mode: 0644]
teuthology/suite/test/test_placeholder.py [new file with mode: 0644]
teuthology/suite/test/test_run_.py [new file with mode: 0644]
teuthology/suite/test/test_util.py [new file with mode: 0644]
teuthology/suite/util.py [new file with mode: 0644]
teuthology/test/suites/noop/noop.yaml [deleted file]
teuthology/test/test_matrix.py [deleted file]
teuthology/test/test_suite.py [deleted file]

index 10206a67af16e8937aaf27067553c9a2eb35b6c3..f43e14807fd6ffd2d17d039a4d77ce58484054eb 100644 (file)
@@ -8,7 +8,7 @@ import sys
 import yaml
 
 from teuthology.exceptions import ParseError
-from teuthology.suite import build_matrix, combine_path
+from teuthology.suite.build_matrix import build_matrix, combine_path
 
 
 def main(args):
diff --git a/teuthology/matrix.py b/teuthology/matrix.py
deleted file mode 100644 (file)
index f4d91a5..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-import os
-import heapq
-from fractions import gcd
-
-def lcm(a, b):
-    return a*b // gcd(a, b)
-def lcml(l):
-    return reduce(lcm, l)
-
-class Matrix:
-    """
-    Interface for sets
-    """
-    def size(self):
-        pass
-
-    def index(self, i):
-        """
-        index() should return a recursive structure represending the paths
-        to concatenate for index i:
-
-        Result :: (PathSegment, Result) | {Result}
-        Path :: string
-
-        {Result} is a frozen_set of Results indicating that
-        the set of paths resulting from each of the contained
-        Results should be concatenated.  (PathSegment, Result)
-        indicates that PathSegment should be prepended to the
-        paths resulting from Result.
-        """
-        pass
-
-    def minscanlen(self):
-        """
-        min run require to get a good sample
-        """
-        pass
-
-    def cyclicity(self):
-        """
-        A cyclicity of N means that the set represented by the Matrix
-        can be chopped into N good subsets of sequential indices.
-        """
-        return self.size() / self.minscanlen()
-
-    def tostr(self, depth):
-        pass
-
-    def __str__(self):
-        """
-        str method
-        """
-        return self.tostr(0)
-
-
-class Cycle(Matrix):
-    """
-    Run a matrix multiple times
-    """
-    def __init__(self, num, mat):
-        self.mat = mat
-        self.num = num
-
-    def size(self):
-        return self.mat.size() * self.num
-
-    def index(self, i):
-        return self.mat.index(i % self.mat.size())
-
-    def minscanlen(self):
-        return self.mat.minscanlen()
-
-    def tostr(self, depth):
-        return '\t'*depth + "Cycle({num}):\n".format(num=self.num) + self.mat.tostr(depth + 1)
-
-class Base(Matrix):
-    """
-    Just a single item.
-    """
-    def __init__(self, item):
-        self.item = item
-
-    def size(self):
-        return 1
-
-    def index(self, i):
-        return self.item
-
-    def minscanlen(self):
-        return 1
-
-    def tostr(self, depth):
-        return '\t'*depth + "Base({item})\n".format(item=self.item)
-
-
-class Product(Matrix):
-    """
-    Builds items by taking one item from each submatrix.  Contiguous
-    subsequences should move through all dimensions.
-    """
-    def __init__(self, item, _submats):
-        assert len(_submats) > 0, \
-            "Product requires child submats to be passed in"
-        self.item = item
-
-        submats = sorted(
-            [((i.size(), ind), i) for (i, ind) in
-             zip(_submats, range(len(_submats)))], reverse=True)
-        self.submats = []
-        self._size = 1
-        for ((size, _), submat) in submats:
-            self.submats.append((self._size, submat))
-            self._size *= size
-        self.submats.reverse()
-
-        self._minscanlen = max([i.minscanlen() for i in _submats])
-        if self._minscanlen + 1 > self._size:
-            self._minscanlen  = self._size
-        else:
-            self._minscanlen += 1
-
-    def tostr(self, depth):
-        ret = '\t'*depth + "Product({item}):\n".format(item=self.item)
-        return ret + ''.join([i[1].tostr(depth+1) for i in self.submats])
-
-    def minscanlen(self):
-        return self._minscanlen
-
-    def size(self):
-        return self._size
-
-    def _index(self, i, submats):
-        """
-        We recursively reduce the N dimension problem to a two
-        dimension problem.
-
-        index(i) = (lmat.index(i % lmat.size()), rmat.index(i %
-        rmat.size())) would simply work if lmat.size() and rmat.size()
-        are relatively prime.
-
-        In general, if the gcd(lmat.size(), rmat.size()) == N,
-        index(i) would be periodic on the interval (lmat.size() *
-        rmat.size()) / N.  To adjust, we decrement the lmat index
-        number on each repeat.  Each of the N repeats must therefore
-        be distinct from the previous ones resulting in lmat.size() *
-        rmat.size() combinations.
-        """
-        assert len(submats) > 0, \
-            "_index requires non-empty submats"
-        if len(submats) == 1:
-            return frozenset([submats[0][1].index(i)])
-
-        lmat = submats[0][1]
-        lsize = lmat.size()
-
-        rsize = submats[0][0]
-
-        cycles = gcd(rsize, lsize)
-        clen = (rsize * lsize) / cycles
-        off = (i / clen) % cycles
-
-        def combine(r, s=frozenset()):
-            if type(r) is frozenset:
-                return s | r
-            return s | frozenset([r])
-
-        litems = lmat.index((i - off) % lmat.size())
-        ritems = self._index(i, submats[1:])
-        return combine(litems, combine(ritems))
-
-    def index(self, i):
-        items = self._index(i, self.submats)
-        return (self.item, items)
-
-class Concat(Matrix):
-    """
-    Concatenates all items in child matrices
-    """
-    def __init__(self, item, submats):
-        self.submats = submats
-        self.item = item
-
-    def size(self):
-        return 1
-
-    def minscanlen(self):
-        return 1
-
-    def index(self, i):
-        out = frozenset()
-        for submat in self.submats:
-            for i in range(submat.size()):
-                out = out | frozenset([submat.index(i)])
-        return (self.item, out)
-
-    def tostr(self, depth):
-        ret = '\t'*depth + "Concat({item}):\n".format(item=self.item)
-        return ret + ''.join([i[1].tostr(depth+1) for i in self.submats])
-
-class Sum(Matrix):
-    """
-    We want to mix the subsequences proportionately to their size.
-
-    The intuition is that we map all of the subsequences uniformly
-    onto rational numbers in [0, 1).  The ith subsequence with length
-    l will have index k map onto i*<epsilon> + k*(1/l).  i*<epsilon>
-    ensures that no two subsequences have an index which shares a
-    mapping in [0, 1) as long as <epsilon> is chosen to be small
-    enough.
-
-    Rather than actually dealing with rational numbers, however, we'll
-    instead map onto whole numbers in [0, pseudo_size) where
-    pseudo_size is the lcm of the subsequence lengths * the number of
-    subsequences.  Including the number of subsequences in the product
-    allows us to use 1 as <epsilon>.  For each subsequence, we designate
-    an offset (position in input list) and a multiple (pseudo_size / size)
-    such that the psuedo_index for index i is <offset> + i*<multiple>.
-
-    I don't have a good way to map index to pseudo index, so we'll
-    precompute a mapping in the constructor (self._i_so_sis) from
-    index to (subset_index, subset).
-    """
-    def __init__(self, item, _submats):
-        assert len(_submats) > 0, \
-            "Sum requires non-empty _submats"
-        self.item = item
-
-        self._pseudo_size = lcml((i.size() for i in _submats)) * len(_submats)
-        self._size = sum((i.size() for i in _submats))
-        self._submats = [
-            ((i, self._pseudo_size / s.size()), s) for (i, s) in \
-            zip(range(len(_submats)), _submats)
-        ]
-
-        def sm_to_pmsl(((offset, multiple), submat)):
-            """
-            submat tuple to pseudo minscanlen
-            """
-            return submat.minscanlen() * multiple
-
-        def index_to_pindex_generator(submats):
-            assert len(submats) > 0, "submats must be non-empty"
-            h = []
-            for (offset, multiple), submat in submats:
-                heapq.heappush(h, (offset, 0, multiple, submat))
-            while True:
-                cur, si, multiple, submat = heapq.heappop(h)
-                heapq.heappush(
-                    h,
-                    (cur + multiple, si + 1, multiple, submat))
-                yield si, submat
-
-        self._i_to_sis = dict(
-            zip(range(self._size), index_to_pindex_generator(self._submats))
-        )
-
-        self._minscanlen = self.pseudo_index_to_index(
-            max(map(sm_to_pmsl, self._submats)))
-
-    def pi_to_sis(self, pi, (offset, multiple)):
-        """
-        max(i) s.t. offset + i*multiple <= pi
-        """
-        if pi < offset:
-            return -1
-        return (pi - offset) / multiple
-
-    def pseudo_index_to_index(self, pi):
-        """
-        Count all pseudoindex values <= pi with corresponding subset indices
-        """
-        return sum((self.pi_to_sis(pi, i) + 1 for i, _ in self._submats)) - 1
-
-    def tostr(self, depth):
-        ret = '\t'*depth + "Sum({item}):\n".format(item=self.item)
-        return ret + ''.join([i[1].tostr(depth+1) for i in self._submats])
-
-    def minscanlen(self):
-        return self._minscanlen
-
-    def size(self):
-        return self._size
-
-    def index(self, i):
-        si, submat = self._i_to_sis[i % self._size]
-        return (self.item, submat.index(si))
-
-def generate_lists(result):
-    """
-    Generates a set of tuples representing paths to concatenate
-    """
-    if type(result) is frozenset:
-        ret = []
-        for i in result:
-            ret.extend(generate_lists(i))
-        return frozenset(ret)
-    elif type(result) is tuple:
-        ret = []
-        (item, children) = result
-        for f in generate_lists(children):
-            nf = [item]
-            nf.extend(f)
-            ret.append(tuple(nf))
-        return frozenset(ret)
-    else:
-        return frozenset([(result,)])
-
-
-def generate_paths(path, result, joinf=os.path.join):
-    """
-    Generates from the result set a list of sorted paths to concatenate
-    """
-    return [reduce(joinf, i, path) for i in sorted(generate_lists(result))]
-
-
-def generate_desc(joinf, result):
-    """
-    Generates the text description of the test represented by result
-    """
-    if type(result) is frozenset:
-        ret = []
-        for i in sorted(result):
-            ret.append(generate_desc(joinf, i))
-        return '{' + ' '.join(ret) + '}'
-    elif type(result) is tuple:
-        (item, children) = result
-        cdesc = generate_desc(joinf, children)
-        return joinf(str(item), cdesc)
-    else:
-        return str(result)
index 4a3bb92b8e99e575e2e2164d3cd4e97c759d428f..b62f295154f0881377ae9779b34fdbcd22992fec 100644 (file)
@@ -480,7 +480,7 @@ class GitbuilderProject(object):
             "debian",
         ) else "rpm"
         # avoiding circular imports
-        from teuthology.suite import get_install_task_flavor
+        from teuthology.suite.util import get_install_task_flavor
         # when we're initializing from a full teuthology config, not just a
         # task config we need to make sure we're looking at the flavor for
         # the install task
diff --git a/teuthology/suite.py b/teuthology/suite.py
deleted file mode 100644 (file)
index 668381d..0000000
+++ /dev/null
@@ -1,1248 +0,0 @@
-# this file is responsible for submitting tests into the queue
-# by generating combinations of facets found in
-# https://github.com/ceph/ceph-qa-suite.git
-
-import copy
-from datetime import datetime
-import logging
-import os
-import requests
-import pwd
-import re
-import subprocess
-import smtplib
-import socket
-import sys
-import time
-import yaml
-from email.mime.text import MIMEText
-from tempfile import NamedTemporaryFile
-
-import teuthology
-import matrix
-from . import lock
-from .config import config, JobConfig, YamlConfig
-from .exceptions import (BranchNotFoundError, CommitNotFoundError,
-                         ScheduleFailError)
-from .misc import deep_merge, get_results_url
-from .orchestra.opsys import OS
-from .packaging import GitbuilderProject
-from .repo_utils import fetch_qa_suite, fetch_teuthology
-from .report import ResultsReporter
-from .results import UNFINISHED_STATUSES
-from .task.install import get_flavor
-
-log = logging.getLogger(__name__)
-
-
-def process_args(args):
-    conf = YamlConfig()
-    rename_args = {
-        'ceph': 'ceph_branch',
-        'sha1': 'ceph_sha1',
-        'kernel': 'kernel_branch',
-        # FIXME: ceph flavor and kernel flavor are separate things
-        'flavor': 'kernel_flavor',
-        '<config_yaml>': 'base_yaml_paths',
-        'filter': 'filter_in',
-    }
-    for (key, value) in args.iteritems():
-        # Translate --foo-bar to foo_bar
-        key = key.lstrip('--').replace('-', '_')
-        # Rename the key if necessary
-        key = rename_args.get(key) or key
-        if key == 'suite':
-            value = value.replace('/', ':')
-        elif key in ('limit', 'priority', 'num'):
-            value = int(value)
-        conf[key] = value
-    return conf
-
-
-def main(args):
-    fn = process_args(args)
-    if fn.verbose:
-        teuthology.log.setLevel(logging.DEBUG)
-
-    if not fn.machine_type or fn.machine_type == 'None':
-        schedule_fail("Must specify a machine_type")
-    elif 'multi' in fn.machine_type:
-        schedule_fail("'multi' is not a valid machine_type. " +
-                      "Maybe you want 'plana,mira,burnupi' or similar")
-
-    if fn.email:
-        config.results_email = fn.email
-    if args['--archive-upload']:
-        config.archive_upload = args['--archive-upload']
-        log.info('Will upload archives to ' + args['--archive-upload'])
-
-    subset = None
-    if args['--subset']:
-        # take input string '2/3' and turn into (2, 3)
-        subset = tuple(map(int, args['--subset'].split('/')))
-        log.info('Passed subset=%s/%s' % (str(subset[0]), str(subset[1])))
-
-    run = Run(fn)
-    job_config = run.base_config
-    name = run.name
-
-    job_config.name = name
-    job_config.priority = fn.priority
-    if config.results_email:
-        job_config.email = config.results_email
-    if fn.owner:
-        job_config.owner = fn.owner
-
-    if fn.dry_run:
-        log.debug("Base job config:\n%s" % job_config)
-
-    with NamedTemporaryFile(prefix='schedule_suite_',
-                            delete=False) as base_yaml:
-        base_yaml.write(str(job_config))
-        base_yaml_path = base_yaml.name
-    run.base_yaml_paths.insert(0, base_yaml_path)
-    run.prepare_and_schedule()
-    os.remove(base_yaml_path)
-    if not fn.dry_run and args['--wait']:
-        return wait(name, config.max_job_time,
-                    args['--archive-upload-url'])
-
-
-class Run(object):
-    WAIT_MAX_JOB_TIME = 30 * 60
-    WAIT_PAUSE = 5 * 60
-    __slots__ = (
-        'args', 'name', 'base_config', 'suite_repo_path', 'base_yaml_paths',
-        'base_args',
-    )
-
-    def __init__(self, args):
-        """
-        args must be a config.YamlConfig object
-        """
-        self.args = args
-        self.name = self.make_run_name()
-        self.base_config = self.create_initial_config()
-
-        if self.args.suite_dir:
-            self.suite_repo_path = self.args.suite_dir
-        else:
-            self.suite_repo_path = fetch_repos(self.base_config.suite_branch,
-                                               test_name=self.name)
-
-        # Interpret any relative paths as being relative to ceph-qa-suite
-        # (absolute paths are unchanged by this)
-        self.base_yaml_paths = [os.path.join(self.suite_repo_path, b) for b in
-                                self.args.base_yaml_paths]
-
-    def make_run_name(self):
-        """
-        Generate a run name. A run name looks like:
-            teuthology-2014-06-23_19:00:37-rados-dumpling-testing-basic-plana
-        """
-        user = self.args.user or pwd.getpwuid(os.getuid()).pw_name
-        # We assume timestamp is a datetime.datetime object
-        timestamp = self.args.timestamp or \
-            datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
-
-        worker = get_worker(self.args.machine_type)
-        return '-'.join(
-            [
-                user, str(timestamp), self.args.suite, self.args.ceph_branch,
-                self.args.kernel_branch or '-', self.args.kernel_flavor, worker
-            ]
-        )
-
-    def create_initial_config(self):
-        """
-        Put together the config file used as the basis for each job in the run.
-        Grabs hashes for the latest ceph, kernel and teuthology versions in the
-        branches specified and specifies them so we know exactly what we're
-        testing.
-
-        :returns: A JobConfig object
-        """
-        kernel_dict = self.choose_kernel()
-        ceph_hash = self.choose_ceph_hash()
-        # We don't store ceph_version because we don't use it yet outside of
-        # logging.
-        self.choose_ceph_version(ceph_hash)
-        teuthology_branch = self.choose_teuthology_branch()
-        suite_branch = self.choose_suite_branch()
-        suite_hash = self.choose_suite_hash(suite_branch)
-
-        config_input = dict(
-            suite=self.args.suite,
-            suite_branch=suite_branch,
-            suite_hash=suite_hash,
-            ceph_branch=self.args.ceph_branch,
-            ceph_hash=ceph_hash,
-            teuthology_branch=teuthology_branch,
-            machine_type=self.args.machine_type,
-            distro=self.args.distro,
-            archive_upload=config.archive_upload,
-            archive_upload_key=config.archive_upload_key,
-        )
-        conf_dict = substitute_placeholders(dict_templ, config_input)
-        conf_dict.update(kernel_dict)
-        job_config = JobConfig.from_dict(conf_dict)
-        return job_config
-
-    def choose_kernel(self):
-        # Put together a stanza specifying the kernel hash
-        if self.args.kernel_branch == 'distro':
-            kernel_hash = 'distro'
-        # Skip the stanza if no -k given
-        elif self.args.kernel_branch is None:
-            kernel_hash = None
-        else:
-            kernel_hash = get_gitbuilder_hash(
-                'kernel', self.args.kernel_branch, self.args.kernel_flavor,
-                self.args.machine_type, self.args.distro,
-            )
-            if not kernel_hash:
-                schedule_fail(
-                    message="Kernel branch '{branch}' not found".format(
-                        branch=self.args.kernel_branch), name=self.name
-                )
-        if kernel_hash:
-            log.info("kernel sha1: {hash}".format(hash=kernel_hash))
-            kernel_dict = dict(kernel=dict(kdb=True, sha1=kernel_hash))
-            if kernel_hash is not 'distro':
-                kernel_dict['kernel']['flavor'] = self.args.kernel_flavor
-        else:
-            kernel_dict = dict()
-        return kernel_dict
-
-    def choose_ceph_hash(self):
-        """
-        Get the ceph hash: if --sha1/-S is supplied, use it if it is valid, and
-        just keep the ceph_branch around.  Otherwise use the current git branch
-        tip.
-        """
-
-        if self.args.ceph_sha1:
-            ceph_hash = git_validate_sha1('ceph', self.args.ceph_sha1)
-            if not ceph_hash:
-                exc = CommitNotFoundError(self.args.ceph_sha1, 'ceph.git')
-                schedule_fail(message=str(exc), name=self.name)
-            log.info("ceph sha1 explicitly supplied")
-
-        elif self.args.ceph_branch:
-            ceph_hash = git_ls_remote('ceph', self.args.ceph_branch)
-            if not ceph_hash:
-                exc = BranchNotFoundError(self.args.ceph_branch, 'ceph.git')
-                schedule_fail(message=str(exc), name=self.name)
-
-        log.info("ceph sha1: {hash}".format(hash=ceph_hash))
-        return ceph_hash
-
-    def choose_ceph_version(self, ceph_hash):
-        if config.suite_verify_ceph_hash:
-            # Get the ceph package version
-            ceph_version = package_version_for_hash(
-                ceph_hash, self.args.kernel_flavor, self.args.distro,
-                self.args.machine_type,
-            )
-            if not ceph_version:
-                schedule_fail(
-                    "Packages for ceph hash '{ver}' not found".format(
-                        ver=ceph_hash), self.name)
-            log.info("ceph version: {ver}".format(ver=ceph_version))
-            return ceph_version
-        else:
-            log.info('skipping ceph package verification')
-
-    def choose_teuthology_branch(self):
-        teuthology_branch = self.args.teuthology_branch
-        if teuthology_branch and teuthology_branch != 'master':
-            if not git_branch_exists('teuthology', teuthology_branch):
-                exc = BranchNotFoundError(teuthology_branch, 'teuthology.git')
-                schedule_fail(message=str(exc), name=self.name)
-        elif not teuthology_branch:
-            # Decide what branch of teuthology to use
-            if git_branch_exists('teuthology', self.args.ceph_branch):
-                teuthology_branch = self.args.ceph_branch
-            else:
-                log.info(
-                    "branch {0} not in teuthology.git; will use master for"
-                    " teuthology".format(self.args.ceph_branch))
-                teuthology_branch = 'master'
-        log.info("teuthology branch: %s", teuthology_branch)
-        return teuthology_branch
-
-    def choose_suite_branch(self):
-        suite_branch = self.args.suite_branch
-        ceph_branch = self.args.ceph_branch
-        if suite_branch and suite_branch != 'master':
-            if not git_branch_exists('ceph-qa-suite', suite_branch):
-                exc = BranchNotFoundError(suite_branch, 'ceph-qa-suite.git')
-                schedule_fail(message=str(exc), name=self.name)
-        elif not suite_branch:
-            # Decide what branch of ceph-qa-suite to use
-            if git_branch_exists('ceph-qa-suite', ceph_branch):
-                suite_branch = ceph_branch
-            else:
-                log.info(
-                    "branch {0} not in ceph-qa-suite.git; will use master for"
-                    " ceph-qa-suite".format(ceph_branch))
-                suite_branch = 'master'
-        return suite_branch
-
-    def choose_suite_hash(self, suite_branch):
-        suite_hash = git_ls_remote('ceph-qa-suite', suite_branch)
-        if not suite_hash:
-            exc = BranchNotFoundError(suite_branch, 'ceph-qa-suite.git')
-            schedule_fail(message=str(exc), name=self.name)
-        log.info("ceph-qa-suite branch: %s %s", suite_branch, suite_hash)
-
-    def build_base_args(self):
-        base_args = [
-            '--name', self.name,
-            '--num', str(self.args.num),
-            '--worker', get_worker(self.args.machine_type),
-        ]
-        if self.args.dry_run:
-            base_args.append('--dry-run')
-        if self.args.priority is not None:
-            base_args.extend(['--priority', str(self.args.priority)])
-        if self.args.verbose:
-            base_args.append('-v')
-        if self.args.owner:
-            base_args.extend(['--owner', self.args.owner])
-        return base_args
-
-    def prepare_and_schedule(self):
-        """
-        Puts together some "base arguments" with which to execute
-        teuthology-schedule for each job, then passes them and other parameters
-        to schedule_suite(). Finally, schedules a "last-in-suite" job that
-        sends an email to the specified address (if one is configured).
-        """
-        self.base_args = self.build_base_args()
-
-        # Make sure the yaml paths are actually valid
-        for yaml_path in self.base_yaml_paths:
-            full_yaml_path = os.path.join(self.suite_repo_path, yaml_path)
-            if not os.path.exists(full_yaml_path):
-                raise IOError("File not found: " + full_yaml_path)
-
-        num_jobs = self.schedule_suite()
-
-        if self.base_config.email and num_jobs:
-            arg = copy.deepcopy(self.base_args)
-            arg.append('--last-in-suite')
-            arg.extend(['--email', self.base_config.email])
-            if self.args.timeout:
-                arg.extend(['--timeout', self.args.timeout])
-            teuthology_schedule(
-                args=arg,
-                dry_run=self.args.dry_run,
-                verbose=self.args.verbose,
-                log_prefix="Results email: ",
-            )
-            results_url = get_results_url(self.base_config.name)
-            if results_url:
-                log.info("Test results viewable at %s", results_url)
-
-    def schedule_suite(self):
-        """
-        Schedule the suite-run. Returns the number of jobs scheduled.
-        """
-        name = self.name
-        arch = get_arch(self.base_config.machine_type)
-        suite_name = self.base_config.suite
-        suite_path = os.path.join(
-            self.suite_repo_path, 'suites',
-            self.base_config.suite.replace(':', '/'))
-        log.debug('Suite %s in %s' % (suite_name, suite_path))
-        configs = [
-            (combine_path(suite_name, item[0]), item[1]) for item in
-            build_matrix(suite_path, subset=self.args.subset)
-        ]
-        log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (
-            suite_name, suite_path, len(configs)))
-
-        # used as a local cache for package versions from gitbuilder
-        package_versions = dict()
-        jobs_to_schedule = []
-        jobs_missing_packages = []
-        for description, fragment_paths in configs:
-            base_frag_paths = [strip_fragment_path(x) for x in fragment_paths]
-            limit = self.args.limit
-            if limit > 0 and len(jobs_to_schedule) >= limit:
-                log.info(
-                    'Stopped after {limit} jobs due to --limit={limit}'.format(
-                        limit=limit))
-                break
-            # Break apart the filter parameter (one string) into comma
-            # separated components to be used in searches.
-            filter_in = self.args.filter_in
-            if filter_in:
-                filter_list = [x.strip() for x in filter_in.split(',')]
-                if not any([x in description for x in filter_list]):
-                    all_filt = []
-                    for filt_samp in filter_list:
-                        all_filt.extend(
-                            [x.find(filt_samp) < 0 for x in base_frag_paths]
-                        )
-                    if all(all_filt):
-                        continue
-            filter_out = self.args.filter_out
-            if filter_out:
-                filter_list = [x.strip() for x in filter_out.split(',')]
-                if any([x in description for x in filter_list]):
-                    continue
-                all_filt_val = False
-                for filt_samp in filter_list:
-                    flist = [filt_samp in x for x in base_frag_paths]
-                    if any(flist):
-                        all_filt_val = True
-                        continue
-                if all_filt_val:
-                    continue
-
-            raw_yaml = '\n'.join([file(a, 'r').read() for a in fragment_paths])
-
-            parsed_yaml = yaml.load(raw_yaml)
-            os_type = parsed_yaml.get('os_type') or self.base_config.os_type
-            exclude_arch = parsed_yaml.get('exclude_arch')
-            exclude_os_type = parsed_yaml.get('exclude_os_type')
-
-            if exclude_arch and exclude_arch == arch:
-                log.info('Skipping due to excluded_arch: %s facets %s',
-                         exclude_arch, description)
-                continue
-            if exclude_os_type and exclude_os_type == os_type:
-                log.info('Skipping due to excluded_os_type: %s facets %s',
-                         exclude_os_type, description)
-                continue
-
-            arg = copy.deepcopy(self.base_args)
-            arg.extend([
-                '--description', description,
-                '--',
-            ])
-            arg.extend(self.base_yaml_paths)
-            arg.extend(fragment_paths)
-
-            job = dict(
-                yaml=parsed_yaml,
-                desc=description,
-                sha1=self.base_config.sha1,
-                args=arg
-            )
-
-            if config.suite_verify_ceph_hash:
-                full_job_config = dict()
-                deep_merge(full_job_config, self.base_config.to_dict())
-                deep_merge(full_job_config, parsed_yaml)
-                flavor = get_install_task_flavor(full_job_config)
-                sha1 = self.base_config.sha1
-                # Get package versions for this sha1, os_type and flavor. If
-                # we've already retrieved them in a previous loop, they'll be
-                # present in package_versions and gitbuilder will not be asked
-                # again for them.
-                package_versions = get_package_versions(
-                    sha1,
-                    os_type,
-                    flavor,
-                    package_versions
-                )
-                if not has_packages_for_distro(sha1, os_type, flavor,
-                                               package_versions):
-                    m = "Packages for os_type '{os}', flavor {flavor} and " + \
-                        "ceph hash '{ver}' not found"
-                    log.error(m.format(os=os_type, flavor=flavor, ver=sha1))
-                    jobs_missing_packages.append(job)
-
-            jobs_to_schedule.append(job)
-
-        for job in jobs_to_schedule:
-            log.info(
-                'Scheduling %s', job['desc']
-            )
-
-            log_prefix = ''
-            if job in jobs_missing_packages:
-                log_prefix = "Missing Packages: "
-                if (not self.args.dry_run and not
-                        config.suite_allow_missing_packages):
-                    schedule_fail(
-                        "At least one job needs packages that don't exist for "
-                        "hash {sha1}.".format(sha1=self.base_config.sha1),
-                        name,
-                    )
-            teuthology_schedule(
-                args=job['args'],
-                dry_run=self.args.dry_run,
-                verbose=self.args.verbose,
-                log_prefix=log_prefix,
-            )
-            throttle = self.args.throttle
-            if not self.args.dry_run and throttle:
-                log.info("pause between jobs : --throttle " + str(throttle))
-                time.sleep(int(throttle))
-
-        count = len(jobs_to_schedule)
-        missing_count = len(jobs_missing_packages)
-        log.info(
-            'Suite %s in %s scheduled %d jobs.' %
-            (suite_name, suite_path, count)
-        )
-        log.info('%d/%d jobs were filtered out.',
-                 (len(configs) - count),
-                 len(configs))
-        if missing_count:
-            log.warn('Scheduled %d/%d jobs that are missing packages!',
-                     missing_count, count)
-        return count
-
-
-class Job(object):
-    pass
-
-
-class WaitException(Exception):
-    pass
-
-
-def wait(name, max_job_time, upload_url):
-    stale_job = max_job_time + Run.WAIT_MAX_JOB_TIME
-    reporter = ResultsReporter()
-    past_unfinished_jobs = []
-    progress = time.time()
-    log.info("waiting for the suite to complete")
-    log.debug("the list of unfinished jobs will be displayed "
-              "every " + str(Run.WAIT_PAUSE / 60) + " minutes")
-    exit_code = 0
-    while True:
-        jobs = reporter.get_jobs(name, fields=['job_id', 'status'])
-        unfinished_jobs = []
-        for job in jobs:
-            if job['status'] in UNFINISHED_STATUSES:
-                unfinished_jobs.append(job)
-            elif job['status'] != 'pass':
-                exit_code = 1
-        if len(unfinished_jobs) == 0:
-            log.info("wait is done")
-            break
-        if (len(past_unfinished_jobs) == len(unfinished_jobs) and
-                time.time() - progress > stale_job):
-            raise WaitException(
-                "no progress since " + str(config.max_job_time) +
-                " + " + str(Run.WAIT_PAUSE) + " seconds")
-        if len(past_unfinished_jobs) != len(unfinished_jobs):
-            past_unfinished_jobs = unfinished_jobs
-            progress = time.time()
-        time.sleep(Run.WAIT_PAUSE)
-        job_ids = [job['job_id'] for job in unfinished_jobs]
-        log.debug('wait for jobs ' + str(job_ids))
-    jobs = reporter.get_jobs(name, fields=['job_id', 'status',
-                                           'description', 'log_href'])
-    # dead, fail, pass : show fail/dead jobs first
-    jobs = sorted(jobs, lambda a, b: cmp(a['status'], b['status']))
-    for job in jobs:
-        if upload_url:
-            url = os.path.join(upload_url, name, job['job_id'])
-        else:
-            url = job['log_href']
-        log.info(job['status'] + " " + url + " " + job['description'])
-    return exit_code
-
-
-def fetch_repos(branch, test_name):
-    """
-    Fetch the suite repo (and also the teuthology repo) so that we can use it
-    to build jobs. Repos are stored in ~/src/.
-
-    The reason the teuthology repo is also fetched is that currently we use
-    subprocess to call teuthology-schedule to schedule jobs so we need to make
-    sure it is up-to-date. For that reason we always fetch the master branch
-    for test scheduling, regardless of what teuthology branch is requested for
-    testing.
-
-    :returns: The path to the suite repo on disk
-    """
-    try:
-        # When a user is scheduling a test run from their own copy of
-        # teuthology, let's not wreak havoc on it.
-        if config.automated_scheduling:
-            # We use teuthology's master branch in all cases right now
-            if config.teuthology_path is None:
-                fetch_teuthology('master')
-        suite_repo_path = fetch_qa_suite(branch)
-    except BranchNotFoundError as exc:
-        schedule_fail(message=str(exc), name=test_name)
-    return suite_repo_path
-
-
-def schedule_fail(message, name=''):
-    """
-    If an email address has been specified anywhere, send an alert there. Then
-    raise a ScheduleFailError.
-    """
-    email = config.results_email
-    if email:
-        subject = "Failed to schedule {name}".format(name=name)
-        msg = MIMEText(message)
-        msg['Subject'] = subject
-        msg['From'] = config.results_sending_email
-        msg['To'] = email
-        try:
-            smtp = smtplib.SMTP('localhost')
-            smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
-            smtp.quit()
-        except socket.error:
-            log.exception("Failed to connect to mail server!")
-    raise ScheduleFailError(message, name)
-
-
-def get_worker(machine_type):
-    """
-    Map a given machine_type to a beanstalkd worker. If machine_type mentions
-    multiple machine types - e.g. 'plana,mira', then this returns 'multi'.
-    Otherwise it returns what was passed.
-    """
-    if ',' in machine_type:
-        return 'multi'
-    else:
-        return machine_type
-
-
-def get_gitbuilder_hash(project='ceph', branch='master', flavor='basic',
-                        machine_type='plana', distro='ubuntu'):
-    """
-    Find the hash representing the head of the project's repository via
-    querying a gitbuilder repo.
-
-    Will return None in the case of a 404 or any other HTTP error.
-    """
-    # Alternate method for github-hosted projects - left here for informational
-    # purposes
-    # resp = requests.get(
-    #     'https://api.github.com/repos/ceph/ceph/git/refs/heads/master')
-    # hash = .json()['object']['sha']
-    (arch, release, _os) = get_distro_defaults(distro, machine_type)
-    gp = GitbuilderProject(
-        project,
-        dict(
-            branch=branch,
-            flavor=flavor,
-            os_type=distro,
-            arch=arch,
-        ),
-    )
-    return gp.sha1
-
-
-def get_distro_defaults(distro, machine_type):
-    """
-    Given a distro (e.g. 'ubuntu') and machine type, return:
-        (arch, release, pkg_type)
-
-    This is used to default to:
-        ('x86_64', 'trusty', 'deb') when passed 'ubuntu' and 'plana'
-    ('armv7l', 'saucy', 'deb') when passed 'ubuntu' and 'saya'
-    ('x86_64', 'wheezy', 'deb') when passed 'debian'
-    ('x86_64', 'fedora20', 'rpm') when passed 'fedora'
-    And ('x86_64', 'centos7', 'rpm') when passed anything else
-    """
-    arch = 'x86_64'
-    if distro in (None, 'None'):
-        os_type = 'centos'
-        os_version = '7'
-    elif distro in ('rhel', 'centos'):
-        os_type = 'centos'
-        os_version = '7'
-    elif distro == 'ubuntu':
-        os_type = distro
-        if machine_type == 'saya':
-            os_version = '13.10'
-            arch = 'armv7l'
-        else:
-            os_version = '14.04'
-    elif distro == 'debian':
-        os_type = distro
-        os_version = '7'
-    elif distro == 'fedora':
-        os_type = distro
-        os_version = '20'
-    else:
-        raise ValueError("Invalid distro value passed: %s", distro)
-    _os = OS(name=os_type, version=os_version)
-    release = GitbuilderProject._get_distro(
-        _os.name,
-        _os.version,
-        _os.codename,
-    )
-    template = "Defaults for machine_type {mtype} distro {distro}: " \
-        "arch={arch}, release={release}, pkg_type={pkg}"
-    log.debug(template.format(
-        mtype=machine_type,
-        distro=_os.name,
-        arch=arch,
-        release=release,
-        pkg=_os.package_type)
-    )
-    return (
-        arch,
-        release,
-        _os,
-    )
-
-
-def package_version_for_hash(hash, kernel_flavor='basic',
-                             distro='rhel', machine_type='plana'):
-    """
-    Does what it says on the tin. Uses gitbuilder repos.
-
-    :returns: a string.
-    """
-    (arch, release, _os) = get_distro_defaults(distro, machine_type)
-    if distro in (None, 'None'):
-        distro = _os.name
-    gp = GitbuilderProject(
-        'ceph',
-        dict(
-            flavor=kernel_flavor,
-            os_type=distro,
-            arch=arch,
-            sha1=hash,
-        ),
-    )
-    return gp.version
-
-
-def git_ls_remote(project, branch, project_owner='ceph'):
-    """
-    Find the latest sha1 for a given project's branch.
-
-    :returns: The sha1 if found; else None
-    """
-    url = build_git_url(project, project_owner)
-    cmd = "git ls-remote {} {}".format(url, branch)
-    result = subprocess.check_output(
-        cmd, shell=True).split()
-    sha1 = result[0] if result else None
-    log.debug("{} -> {}".format(cmd, sha1))
-    return sha1
-
-
-def git_validate_sha1(project, sha1, project_owner='ceph'):
-    '''
-    Use http to validate that project contains sha1
-    I can't find a way to do this with git, period, so
-    we have specific urls to HEAD for github and git.ceph.com/gitweb
-    for now
-    '''
-    url = build_git_url(project, project_owner)
-
-    if '/github.com/' in url:
-        url = '/'.join((url, 'commit', sha1))
-    elif '/git.ceph.com/' in url:
-        # kinda specific to knowing git.ceph.com is gitweb
-        url = ('http://git.ceph.com/?p=%s.git;a=blob_plain;f=.gitignore;hb=%s'
-               % (project, sha1))
-    else:
-        raise RuntimeError(
-            'git_validate_sha1: how do I check %s for a sha1?' % url
-        )
-
-    resp = requests.head(url)
-    if resp.ok:
-        return sha1
-    return None
-
-
-def build_git_url(project, project_owner='ceph'):
-    """
-    Return the git URL to clone the project
-    """
-    if project == 'ceph-qa-suite':
-        base = config.get_ceph_qa_suite_git_url()
-    elif project == 'ceph':
-        base = config.get_ceph_git_url()
-    else:
-        base = 'https://github.com/{project_owner}/{project}'
-    url_templ = re.sub('\.git$', '', base)
-    return url_templ.format(project_owner=project_owner, project=project)
-
-
-def git_branch_exists(project, branch, project_owner='ceph'):
-    """
-    Query the git repository to check the existence of a project's branch
-    """
-    return git_ls_remote(project, branch, project_owner) is not None
-
-
-def get_branch_info(project, branch, project_owner='ceph'):
-    """
-    NOTE: This is currently not being used because of GitHub's API rate
-    limiting. We use github_branch_exists() instead.
-
-    Use the GitHub API to query a project's branch. Returns:
-        {u'object': {u'sha': <a_sha_string>,
-                    u'type': <string>,
-                    u'url': <url_to_commit>},
-        u'ref': u'refs/heads/<branch>',
-        u'url': <url_to_branch>}
-
-    We mainly use this to check if a branch exists.
-    """
-    url_templ = 'https://api.github.com/repos/{project_owner}/{project}/git/refs/heads/{branch}'  # noqa
-    url = url_templ.format(project_owner=project_owner, project=project,
-                           branch=branch)
-    resp = requests.get(url)
-    if resp.ok:
-        return resp.json()
-
-
-def strip_fragment_path(original_path):
-    """
-    Given a path, remove the text before '/suites/'.  Part of the fix for
-    http://tracker.ceph.com/issues/15470
-    """
-    scan_after = '/suites/'
-    scan_start = original_path.find(scan_after)
-    if scan_start > 0:
-        return original_path[scan_start + len(scan_after):]
-    return original_path
-
-
-def teuthology_schedule(args, verbose, dry_run, log_prefix=''):
-    """
-    Run teuthology-schedule to schedule individual jobs.
-
-    If --dry-run has been passed but --verbose has been passed just once, don't
-    actually run the command - only print what would be executed.
-
-    If --dry-run has been passed and --verbose has been passed multiple times,
-    do both.
-    """
-    exec_path = os.path.join(
-        os.path.dirname(sys.argv[0]),
-        'teuthology-schedule')
-    args.insert(0, exec_path)
-    if dry_run:
-        # Quote any individual args so that individual commands can be copied
-        # and pasted in order to execute them individually.
-        printable_args = []
-        for item in args:
-            if ' ' in item:
-                printable_args.append("'%s'" % item)
-            else:
-                printable_args.append(item)
-        log.info('{0}{1}'.format(
-            log_prefix,
-            ' '.join(printable_args),
-        ))
-    if not dry_run or (dry_run and verbose > 1):
-        subprocess.check_call(args=args)
-
-
-def get_install_task_flavor(job_config):
-    """
-    Pokes through the install task's configuration (including its overrides) to
-    figure out which flavor it will want to install.
-
-    Only looks at the first instance of the install task in job_config.
-    """
-    project, = job_config.get('project', 'ceph'),
-    tasks = job_config.get('tasks', dict())
-    overrides = job_config.get('overrides', dict())
-    install_overrides = overrides.get('install', dict())
-    project_overrides = install_overrides.get(project, dict())
-    first_install_config = dict()
-    for task in tasks:
-        if task.keys()[0] == 'install':
-            first_install_config = task.values()[0] or dict()
-            break
-    first_install_config = copy.deepcopy(first_install_config)
-    deep_merge(first_install_config, install_overrides)
-    deep_merge(first_install_config, project_overrides)
-    return get_flavor(first_install_config)
-
-
-def get_package_versions(sha1, os_type, kernel_flavor, package_versions=None):
-    """
-    Will retrieve the package versions for the given sha1, os_type and
-    kernel_flavor from gitbuilder.
-
-    Optionally, a package_versions dict can be provided
-    from previous calls to this function to avoid calling gitbuilder for
-    information we've already retrieved.
-
-    The package_versions dict will be in the following format::
-
-        {
-            "sha1": {
-                "ubuntu": {
-                    "basic": "version",
-                    }
-                "rhel": {
-                    "basic": "version",
-                    }
-            },
-            "another-sha1": {
-                "ubuntu": {
-                    "basic": "version",
-                    }
-            }
-        }
-
-    :param sha1:             The sha1 hash of the ceph version.
-    :param os_type:          The distro we want to get packages for, given
-                             the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
-    :param kernel_flavor:    The kernel flavor
-    :param package_versions: Use this optionally to use cached results of
-                             previous calls to gitbuilder.
-    :returns:                A dict of package versions. Will return versions
-                             for all hashs and distros, not just for the given
-                             hash and distro.
-    """
-    if not package_versions:
-        package_versions = dict()
-
-    os_type = str(os_type)
-
-    os_types = package_versions.get(sha1, dict())
-    package_versions_for_flavor = os_types.get(os_type, dict())
-    if kernel_flavor not in package_versions_for_flavor:
-        package_version = package_version_for_hash(
-            sha1,
-            kernel_flavor,
-            distro=os_type
-        )
-        package_versions_for_flavor[kernel_flavor] = package_version
-        os_types[os_type] = package_versions_for_flavor
-        package_versions[sha1] = os_types
-
-    return package_versions
-
-
-def has_packages_for_distro(sha1, os_type, kernel_flavor,
-                            package_versions=None):
-    """
-    Checks to see if gitbuilder has packages for the given sha1, os_type and
-    kernel_flavor.
-
-    Optionally, a package_versions dict can be provided
-    from previous calls to this function to avoid calling gitbuilder for
-    information we've already retrieved.
-
-    The package_versions dict will be in the following format::
-
-        {
-            "sha1": {
-                "ubuntu": {
-                    "basic": "version",
-                    }
-                "rhel": {
-                    "basic": "version",
-                    }
-            },
-            "another-sha1": {
-                "ubuntu": {
-                    "basic": "version",
-                    }
-            }
-        }
-
-    :param sha1:             The sha1 hash of the ceph version.
-    :param os_type:          The distro we want to get packages for, given
-                             the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
-    :param kernel_flavor:    The kernel flavor
-    :param package_versions: Use this optionally to use cached results of
-                             previous calls to gitbuilder.
-    :returns:                True, if packages are found. False otherwise.
-    """
-    os_type = str(os_type)
-    if not package_versions:
-        package_versions = get_package_versions(sha1, os_type, kernel_flavor)
-
-    package_versions_for_hash = package_versions.get(sha1, dict()).get(
-        os_type, dict())
-    # we want to return a boolean here, not the actual package versions
-    return bool(package_versions_for_hash.get(kernel_flavor, None))
-
-
-def combine_path(left, right):
-    """
-    os.path.join(a, b) doesn't like it when b is None
-    """
-    if right:
-        return os.path.join(left, right)
-    return left
-
-
-def generate_combinations(path, mat, generate_from, generate_to):
-    """
-    Return a list of items describe by path
-
-    The input is just a path.  The output is an array of (description,
-    [file list]) tuples.
-
-    For a normal file we generate a new item for the result list.
-
-    For a directory, we (recursively) generate a new item for each
-    file/dir.
-
-    For a directory with a magic '+' file, we generate a single item
-    that concatenates all files/subdirs.
-
-    For a directory with a magic '%' file, we generate a result set
-    for each item in the directory, and then do a product to generate
-    a result list with all combinations.
-
-    The final description (after recursion) for each item will look
-    like a relative path.  If there was a % product, that path
-    component will appear as a file with braces listing the selection
-    of chosen subitems.
-    """
-    ret = []
-    for i in range(generate_from, generate_to):
-        output = mat.index(i)
-        ret.append((
-            matrix.generate_desc(combine_path, output),
-            matrix.generate_paths(path, output, combine_path)))
-    return ret
-
-
-def build_matrix(path, subset=None):
-    """
-    Return a list of items descibed by path such that if the list of
-    items is chunked into mincyclicity pieces, each piece is still a
-    good subset of the suite.
-
-    A good subset of a product ensures that each facet member appears
-    at least once.  A good subset of a sum ensures that the subset of
-    each sub collection reflected in the subset is a good subset.
-
-    A mincyclicity of 0 does not attempt to enforce the good subset
-    property.
-
-    The input is just a path.  The output is an array of (description,
-    [file list]) tuples.
-
-    For a normal file we generate a new item for the result list.
-
-    For a directory, we (recursively) generate a new item for each
-    file/dir.
-
-    For a directory with a magic '+' file, we generate a single item
-    that concatenates all files/subdirs (A Sum).
-
-    For a directory with a magic '%' file, we generate a result set
-    for each item in the directory, and then do a product to generate
-    a result list with all combinations (A Product).
-
-    The final description (after recursion) for each item will look
-    like a relative path.  If there was a % product, that path
-    component will appear as a file with braces listing the selection
-    of chosen subitems.
-
-    :param path:        The path to search for yaml fragments
-    :param subset:     (index, outof)
-    """
-    mat, first, matlimit = _get_matrix(path, subset)
-    return generate_combinations(path, mat, first, matlimit)
-
-
-def _get_matrix(path, subset=None):
-    mat = None
-    first = None
-    matlimit = None
-    if subset:
-        (index, outof) = subset
-        mat = _build_matrix(path, mincyclicity=outof)
-        first = (mat.size() / outof) * index
-        if index == outof or index == outof - 1:
-            matlimit = mat.size()
-        else:
-            matlimit = (mat.size() / outof) * (index + 1)
-    else:
-        first = 0
-        mat = _build_matrix(path)
-        matlimit = mat.size()
-    return mat, first, matlimit
-
-
-def _build_matrix(path, mincyclicity=0, item=''):
-    if not os.path.exists(path):
-        raise IOError('%s does not exist' % path)
-    if os.path.isfile(path):
-        if path.endswith('.yaml'):
-            return matrix.Base(item)
-        return None
-    if os.path.isdir(path):
-        if path.endswith('.disable'):
-            return None
-        files = sorted(os.listdir(path))
-        if len(files) == 0:
-            return None
-        if '+' in files:
-            # concatenate items
-            files.remove('+')
-            submats = []
-            for fn in sorted(files):
-                submat = _build_matrix(
-                    os.path.join(path, fn),
-                    mincyclicity,
-                    fn)
-                if submat is not None:
-                    submats.append(submat)
-            return matrix.Concat(item, submats)
-        elif '%' in files:
-            # convolve items
-            files.remove('%')
-            submats = []
-            for fn in sorted(files):
-                submat = _build_matrix(
-                    os.path.join(path, fn),
-                    mincyclicity=0,
-                    item=fn)
-                if submat is not None:
-                    submats.append(submat)
-            mat = matrix.Product(item, submats)
-            if mat and mat.cyclicity() < mincyclicity:
-                mat = matrix.Cycle(
-                    (mincyclicity + mat.cyclicity() - 1) / mat.cyclicity(), mat
-                )
-            return mat
-        else:
-            # list items
-            submats = []
-            for fn in sorted(files):
-                submat = _build_matrix(
-                    os.path.join(path, fn),
-                    mincyclicity,
-                    fn)
-                if submat is None:
-                    continue
-                if submat.cyclicity() < mincyclicity:
-                    submat = matrix.Cycle(
-                        ((mincyclicity + submat.cyclicity() - 1) /
-                         submat.cyclicity()),
-                        submat)
-                submats.append(submat)
-            return matrix.Sum(item, submats)
-    assert False, "Invalid path %s seen in _build_matrix" % path
-    return None
-
-
-def get_arch(machine_type):
-    """
-    Based on a given machine_type, return its architecture by querying the lock
-    server.
-
-    :returns: A string or None
-    """
-    result = lock.list_locks(machine_type=machine_type, count=1)
-    if not result:
-        log.warn("No machines found with machine_type %s!", machine_type)
-    else:
-        return result[0]['arch']
-
-
-class Placeholder(object):
-    """
-    A placeholder for use with substitute_placeholders. Simply has a 'name'
-    attribute.
-    """
-    def __init__(self, name):
-        self.name = name
-
-
-def substitute_placeholders(input_dict, values_dict):
-    """
-    Replace any Placeholder instances with values named in values_dict. In the
-    case of None values, the key is omitted from the result.
-
-    Searches through nested dicts.
-
-    :param input_dict:  A dict which may contain one or more Placeholder
-                        instances as values.
-    :param values_dict: A dict, with keys matching the 'name' attributes of all
-                        of the Placeholder instances in the input_dict, and
-                        values to be substituted.
-    :returns:           The modified input_dict
-    """
-    input_dict = copy.deepcopy(input_dict)
-
-    def _substitute(input_dict, values_dict):
-        for key, value in input_dict.items():
-            if isinstance(value, dict):
-                _substitute(value, values_dict)
-            elif isinstance(value, Placeholder):
-                if values_dict[value.name] is None:
-                    del input_dict[key]
-                    continue
-                # If there is a Placeholder without a corresponding entry in
-                # values_dict, we will hit a KeyError - we want this.
-                input_dict[key] = values_dict[value.name]
-        return input_dict
-
-    return _substitute(input_dict, values_dict)
-
-
-# Template for the config that becomes the base for each generated job config
-dict_templ = {
-    'branch': Placeholder('ceph_branch'),
-    'sha1': Placeholder('ceph_hash'),
-    'teuthology_branch': Placeholder('teuthology_branch'),
-    'archive_upload': Placeholder('archive_upload'),
-    'archive_upload_key': Placeholder('archive_upload_key'),
-    'machine_type': Placeholder('machine_type'),
-    'nuke-on-error': True,
-    'os_type': Placeholder('distro'),
-    'overrides': {
-        'admin_socket': {
-            'branch': Placeholder('ceph_branch'),
-        },
-        'ceph': {
-            'conf': {
-                'mon': {
-                    'debug mon': 20,
-                    'debug ms': 1,
-                    'debug paxos': 20},
-                'osd': {
-                    'debug filestore': 20,
-                    'debug journal': 20,
-                    'debug ms': 1,
-                    'debug osd': 25
-                }
-            },
-            'log-whitelist': ['slow request'],
-            'sha1': Placeholder('ceph_hash'),
-        },
-        'ceph-deploy': {
-            'branch': {
-                'dev-commit': Placeholder('ceph_hash'),
-            },
-            'conf': {
-                'client': {
-                    'log file': '/var/log/ceph/ceph-$name.$pid.log'
-                },
-                'mon': {
-                    'debug mon': 1,
-                    'debug ms': 20,
-                    'debug paxos': 20,
-                    'osd default pool size': 2
-                }
-            }
-        },
-        'install': {
-            'ceph': {
-                'sha1': Placeholder('ceph_hash'),
-            }
-        },
-        'workunit': {
-            'sha1': Placeholder('ceph_hash'),
-        }
-    },
-    'suite': Placeholder('suite'),
-    'suite_branch': Placeholder('suite_branch'),
-    'suite_sha1': Placeholder('suite_hash'),
-    'tasks': [],
-}
diff --git a/teuthology/suite/__init__.py b/teuthology/suite/__init__.py
new file mode 100644 (file)
index 0000000..273a56a
--- /dev/null
@@ -0,0 +1,139 @@
+# this file is responsible for submitting tests into the queue
+# by generating combinations of facets found in
+# https://github.com/ceph/ceph-qa-suite.git
+
+import logging
+import os
+import time
+from tempfile import NamedTemporaryFile
+
+import teuthology
+from ..config import config, YamlConfig
+from ..report import ResultsReporter
+from ..results import UNFINISHED_STATUSES
+
+from .run import Run
+from .util import schedule_fail
+
+log = logging.getLogger(__name__)
+
+
+def process_args(args):
+    conf = YamlConfig()
+    rename_args = {
+        'ceph': 'ceph_branch',
+        'sha1': 'ceph_sha1',
+        'kernel': 'kernel_branch',
+        # FIXME: ceph flavor and kernel flavor are separate things
+        'flavor': 'kernel_flavor',
+        '<config_yaml>': 'base_yaml_paths',
+        'filter': 'filter_in',
+    }
+    for (key, value) in args.iteritems():
+        # Translate --foo-bar to foo_bar
+        key = key.lstrip('--').replace('-', '_')
+        # Rename the key if necessary
+        key = rename_args.get(key) or key
+        if key == 'suite':
+            value = value.replace('/', ':')
+        elif key in ('limit', 'priority', 'num'):
+            value = int(value)
+        conf[key] = value
+    return conf
+
+
+def main(args):
+    fn = process_args(args)
+    if fn.verbose:
+        teuthology.log.setLevel(logging.DEBUG)
+
+    if not fn.machine_type or fn.machine_type == 'None':
+        schedule_fail("Must specify a machine_type")
+    elif 'multi' in fn.machine_type:
+        schedule_fail("'multi' is not a valid machine_type. " +
+                      "Maybe you want 'plana,mira,burnupi' or similar")
+
+    if fn.email:
+        config.results_email = fn.email
+    if args['--archive-upload']:
+        config.archive_upload = args['--archive-upload']
+        log.info('Will upload archives to ' + args['--archive-upload'])
+
+    subset = None
+    if args['--subset']:
+        # take input string '2/3' and turn into (2, 3)
+        subset = tuple(map(int, args['--subset'].split('/')))
+        log.info('Passed subset=%s/%s' % (str(subset[0]), str(subset[1])))
+
+    run = Run(fn)
+    job_config = run.base_config
+    name = run.name
+
+    job_config.name = name
+    job_config.priority = fn.priority
+    if config.results_email:
+        job_config.email = config.results_email
+    if fn.owner:
+        job_config.owner = fn.owner
+
+    if fn.dry_run:
+        log.debug("Base job config:\n%s" % job_config)
+
+    with NamedTemporaryFile(prefix='schedule_suite_',
+                            delete=False) as base_yaml:
+        base_yaml.write(str(job_config))
+        base_yaml_path = base_yaml.name
+    run.base_yaml_paths.insert(0, base_yaml_path)
+    run.prepare_and_schedule()
+    os.remove(base_yaml_path)
+    if not fn.dry_run and args['--wait']:
+        return wait(name, config.max_job_time,
+                    args['--archive-upload-url'])
+
+
+class WaitException(Exception):
+    pass
+
+
+def wait(name, max_job_time, upload_url):
+    stale_job = max_job_time + Run.WAIT_MAX_JOB_TIME
+    reporter = ResultsReporter()
+    past_unfinished_jobs = []
+    progress = time.time()
+    log.info("waiting for the suite to complete")
+    log.debug("the list of unfinished jobs will be displayed "
+              "every " + str(Run.WAIT_PAUSE / 60) + " minutes")
+    exit_code = 0
+    while True:
+        jobs = reporter.get_jobs(name, fields=['job_id', 'status'])
+        unfinished_jobs = []
+        for job in jobs:
+            if job['status'] in UNFINISHED_STATUSES:
+                unfinished_jobs.append(job)
+            elif job['status'] != 'pass':
+                exit_code = 1
+        if len(unfinished_jobs) == 0:
+            log.info("wait is done")
+            break
+        if (len(past_unfinished_jobs) == len(unfinished_jobs) and
+                time.time() - progress > stale_job):
+            raise WaitException(
+                "no progress since " + str(config.max_job_time) +
+                " + " + str(Run.WAIT_PAUSE) + " seconds")
+        if len(past_unfinished_jobs) != len(unfinished_jobs):
+            past_unfinished_jobs = unfinished_jobs
+            progress = time.time()
+        time.sleep(Run.WAIT_PAUSE)
+        job_ids = [job['job_id'] for job in unfinished_jobs]
+        log.debug('wait for jobs ' + str(job_ids))
+    jobs = reporter.get_jobs(name, fields=['job_id', 'status',
+                                           'description', 'log_href'])
+    # dead, fail, pass : show fail/dead jobs first
+    jobs = sorted(jobs, lambda a, b: cmp(a['status'], b['status']))
+    for job in jobs:
+        if upload_url:
+            url = os.path.join(upload_url, name, job['job_id'])
+        else:
+            url = job['log_href']
+        log.info(job['status'] + " " + url + " " + job['description'])
+    return exit_code
diff --git a/teuthology/suite/build_matrix.py b/teuthology/suite/build_matrix.py
new file mode 100644 (file)
index 0000000..ce85879
--- /dev/null
@@ -0,0 +1,167 @@
+import os
+
+from . import matrix
+
+
+def build_matrix(path, subset=None):
+    """
+    Return a list of items descibed by path such that if the list of
+    items is chunked into mincyclicity pieces, each piece is still a
+    good subset of the suite.
+
+    A good subset of a product ensures that each facet member appears
+    at least once.  A good subset of a sum ensures that the subset of
+    each sub collection reflected in the subset is a good subset.
+
+    A mincyclicity of 0 does not attempt to enforce the good subset
+    property.
+
+    The input is just a path.  The output is an array of (description,
+    [file list]) tuples.
+
+    For a normal file we generate a new item for the result list.
+
+    For a directory, we (recursively) generate a new item for each
+    file/dir.
+
+    For a directory with a magic '+' file, we generate a single item
+    that concatenates all files/subdirs (A Sum).
+
+    For a directory with a magic '%' file, we generate a result set
+    for each item in the directory, and then do a product to generate
+    a result list with all combinations (A Product).
+
+    The final description (after recursion) for each item will look
+    like a relative path.  If there was a % product, that path
+    component will appear as a file with braces listing the selection
+    of chosen subitems.
+
+    :param path:        The path to search for yaml fragments
+    :param subset:     (index, outof)
+    """
+    mat, first, matlimit = _get_matrix(path, subset)
+    return generate_combinations(path, mat, first, matlimit)
+
+
+def _get_matrix(path, subset=None):
+    mat = None
+    first = None
+    matlimit = None
+    if subset:
+        (index, outof) = subset
+        mat = _build_matrix(path, mincyclicity=outof)
+        first = (mat.size() / outof) * index
+        if index == outof or index == outof - 1:
+            matlimit = mat.size()
+        else:
+            matlimit = (mat.size() / outof) * (index + 1)
+    else:
+        first = 0
+        mat = _build_matrix(path)
+        matlimit = mat.size()
+    return mat, first, matlimit
+
+
+def _build_matrix(path, mincyclicity=0, item=''):
+    if not os.path.exists(path):
+        raise IOError('%s does not exist (abs %s)' % (path, os.path.abspath(path)))
+    if os.path.isfile(path):
+        if path.endswith('.yaml'):
+            return matrix.Base(item)
+        return None
+    if os.path.isdir(path):
+        if path.endswith('.disable'):
+            return None
+        files = sorted(os.listdir(path))
+        if len(files) == 0:
+            return None
+        if '+' in files:
+            # concatenate items
+            files.remove('+')
+            submats = []
+            for fn in sorted(files):
+                submat = _build_matrix(
+                    os.path.join(path, fn),
+                    mincyclicity,
+                    fn)
+                if submat is not None:
+                    submats.append(submat)
+            return matrix.Concat(item, submats)
+        elif '%' in files:
+            # convolve items
+            files.remove('%')
+            submats = []
+            for fn in sorted(files):
+                submat = _build_matrix(
+                    os.path.join(path, fn),
+                    mincyclicity=0,
+                    item=fn)
+                if submat is not None:
+                    submats.append(submat)
+            mat = matrix.Product(item, submats)
+            if mat and mat.cyclicity() < mincyclicity:
+                mat = matrix.Cycle(
+                    (mincyclicity + mat.cyclicity() - 1) / mat.cyclicity(), mat
+                )
+            return mat
+        else:
+            # list items
+            submats = []
+            for fn in sorted(files):
+                submat = _build_matrix(
+                    os.path.join(path, fn),
+                    mincyclicity,
+                    fn)
+                if submat is None:
+                    continue
+                if submat.cyclicity() < mincyclicity:
+                    submat = matrix.Cycle(
+                        ((mincyclicity + submat.cyclicity() - 1) /
+                         submat.cyclicity()),
+                        submat)
+                submats.append(submat)
+            return matrix.Sum(item, submats)
+    assert False, "Invalid path %s seen in _build_matrix" % path
+    return None
+
+
+def generate_combinations(path, mat, generate_from, generate_to):
+    """
+    Return a list of items describe by path
+
+    The input is just a path.  The output is an array of (description,
+    [file list]) tuples.
+
+    For a normal file we generate a new item for the result list.
+
+    For a directory, we (recursively) generate a new item for each
+    file/dir.
+
+    For a directory with a magic '+' file, we generate a single item
+    that concatenates all files/subdirs.
+
+    For a directory with a magic '%' file, we generate a result set
+    for each item in the directory, and then do a product to generate
+    a result list with all combinations.
+
+    The final description (after recursion) for each item will look
+    like a relative path.  If there was a % product, that path
+    component will appear as a file with braces listing the selection
+    of chosen subitems.
+    """
+    ret = []
+    for i in range(generate_from, generate_to):
+        output = mat.index(i)
+        ret.append((
+            matrix.generate_desc(combine_path, output),
+            matrix.generate_paths(path, output, combine_path)))
+    return ret
+
+
+def combine_path(left, right):
+    """
+    os.path.join(a, b) doesn't like it when b is None
+    """
+    if right:
+        return os.path.join(left, right)
+    return left
diff --git a/teuthology/suite/matrix.py b/teuthology/suite/matrix.py
new file mode 100644 (file)
index 0000000..f4d91a5
--- /dev/null
@@ -0,0 +1,330 @@
+import os
+import heapq
+from fractions import gcd
+
+def lcm(a, b):
+    return a*b // gcd(a, b)
+def lcml(l):
+    return reduce(lcm, l)
+
+class Matrix:
+    """
+    Interface for sets
+    """
+    def size(self):
+        pass
+
+    def index(self, i):
+        """
+        index() should return a recursive structure represending the paths
+        to concatenate for index i:
+
+        Result :: (PathSegment, Result) | {Result}
+        Path :: string
+
+        {Result} is a frozen_set of Results indicating that
+        the set of paths resulting from each of the contained
+        Results should be concatenated.  (PathSegment, Result)
+        indicates that PathSegment should be prepended to the
+        paths resulting from Result.
+        """
+        pass
+
+    def minscanlen(self):
+        """
+        min run require to get a good sample
+        """
+        pass
+
+    def cyclicity(self):
+        """
+        A cyclicity of N means that the set represented by the Matrix
+        can be chopped into N good subsets of sequential indices.
+        """
+        return self.size() / self.minscanlen()
+
+    def tostr(self, depth):
+        pass
+
+    def __str__(self):
+        """
+        str method
+        """
+        return self.tostr(0)
+
+
+class Cycle(Matrix):
+    """
+    Run a matrix multiple times
+    """
+    def __init__(self, num, mat):
+        self.mat = mat
+        self.num = num
+
+    def size(self):
+        return self.mat.size() * self.num
+
+    def index(self, i):
+        return self.mat.index(i % self.mat.size())
+
+    def minscanlen(self):
+        return self.mat.minscanlen()
+
+    def tostr(self, depth):
+        return '\t'*depth + "Cycle({num}):\n".format(num=self.num) + self.mat.tostr(depth + 1)
+
+class Base(Matrix):
+    """
+    Just a single item.
+    """
+    def __init__(self, item):
+        self.item = item
+
+    def size(self):
+        return 1
+
+    def index(self, i):
+        return self.item
+
+    def minscanlen(self):
+        return 1
+
+    def tostr(self, depth):
+        return '\t'*depth + "Base({item})\n".format(item=self.item)
+
+
+class Product(Matrix):
+    """
+    Builds items by taking one item from each submatrix.  Contiguous
+    subsequences should move through all dimensions.
+    """
+    def __init__(self, item, _submats):
+        assert len(_submats) > 0, \
+            "Product requires child submats to be passed in"
+        self.item = item
+
+        submats = sorted(
+            [((i.size(), ind), i) for (i, ind) in
+             zip(_submats, range(len(_submats)))], reverse=True)
+        self.submats = []
+        self._size = 1
+        for ((size, _), submat) in submats:
+            self.submats.append((self._size, submat))
+            self._size *= size
+        self.submats.reverse()
+
+        self._minscanlen = max([i.minscanlen() for i in _submats])
+        if self._minscanlen + 1 > self._size:
+            self._minscanlen  = self._size
+        else:
+            self._minscanlen += 1
+
+    def tostr(self, depth):
+        ret = '\t'*depth + "Product({item}):\n".format(item=self.item)
+        return ret + ''.join([i[1].tostr(depth+1) for i in self.submats])
+
+    def minscanlen(self):
+        return self._minscanlen
+
+    def size(self):
+        return self._size
+
+    def _index(self, i, submats):
+        """
+        We recursively reduce the N dimension problem to a two
+        dimension problem.
+
+        index(i) = (lmat.index(i % lmat.size()), rmat.index(i %
+        rmat.size())) would simply work if lmat.size() and rmat.size()
+        are relatively prime.
+
+        In general, if the gcd(lmat.size(), rmat.size()) == N,
+        index(i) would be periodic on the interval (lmat.size() *
+        rmat.size()) / N.  To adjust, we decrement the lmat index
+        number on each repeat.  Each of the N repeats must therefore
+        be distinct from the previous ones resulting in lmat.size() *
+        rmat.size() combinations.
+        """
+        assert len(submats) > 0, \
+            "_index requires non-empty submats"
+        if len(submats) == 1:
+            return frozenset([submats[0][1].index(i)])
+
+        lmat = submats[0][1]
+        lsize = lmat.size()
+
+        rsize = submats[0][0]
+
+        cycles = gcd(rsize, lsize)
+        clen = (rsize * lsize) / cycles
+        off = (i / clen) % cycles
+
+        def combine(r, s=frozenset()):
+            if type(r) is frozenset:
+                return s | r
+            return s | frozenset([r])
+
+        litems = lmat.index((i - off) % lmat.size())
+        ritems = self._index(i, submats[1:])
+        return combine(litems, combine(ritems))
+
+    def index(self, i):
+        items = self._index(i, self.submats)
+        return (self.item, items)
+
+class Concat(Matrix):
+    """
+    Concatenates all items in child matrices
+    """
+    def __init__(self, item, submats):
+        self.submats = submats
+        self.item = item
+
+    def size(self):
+        return 1
+
+    def minscanlen(self):
+        return 1
+
+    def index(self, i):
+        out = frozenset()
+        for submat in self.submats:
+            for i in range(submat.size()):
+                out = out | frozenset([submat.index(i)])
+        return (self.item, out)
+
+    def tostr(self, depth):
+        ret = '\t'*depth + "Concat({item}):\n".format(item=self.item)
+        return ret + ''.join([i[1].tostr(depth+1) for i in self.submats])
+
+class Sum(Matrix):
+    """
+    We want to mix the subsequences proportionately to their size.
+
+    The intuition is that we map all of the subsequences uniformly
+    onto rational numbers in [0, 1).  The ith subsequence with length
+    l will have index k map onto i*<epsilon> + k*(1/l).  i*<epsilon>
+    ensures that no two subsequences have an index which shares a
+    mapping in [0, 1) as long as <epsilon> is chosen to be small
+    enough.
+
+    Rather than actually dealing with rational numbers, however, we'll
+    instead map onto whole numbers in [0, pseudo_size) where
+    pseudo_size is the lcm of the subsequence lengths * the number of
+    subsequences.  Including the number of subsequences in the product
+    allows us to use 1 as <epsilon>.  For each subsequence, we designate
+    an offset (position in input list) and a multiple (pseudo_size / size)
+    such that the psuedo_index for index i is <offset> + i*<multiple>.
+
+    I don't have a good way to map index to pseudo index, so we'll
+    precompute a mapping in the constructor (self._i_so_sis) from
+    index to (subset_index, subset).
+    """
+    def __init__(self, item, _submats):
+        assert len(_submats) > 0, \
+            "Sum requires non-empty _submats"
+        self.item = item
+
+        self._pseudo_size = lcml((i.size() for i in _submats)) * len(_submats)
+        self._size = sum((i.size() for i in _submats))
+        self._submats = [
+            ((i, self._pseudo_size / s.size()), s) for (i, s) in \
+            zip(range(len(_submats)), _submats)
+        ]
+
+        def sm_to_pmsl(((offset, multiple), submat)):
+            """
+            submat tuple to pseudo minscanlen
+            """
+            return submat.minscanlen() * multiple
+
+        def index_to_pindex_generator(submats):
+            assert len(submats) > 0, "submats must be non-empty"
+            h = []
+            for (offset, multiple), submat in submats:
+                heapq.heappush(h, (offset, 0, multiple, submat))
+            while True:
+                cur, si, multiple, submat = heapq.heappop(h)
+                heapq.heappush(
+                    h,
+                    (cur + multiple, si + 1, multiple, submat))
+                yield si, submat
+
+        self._i_to_sis = dict(
+            zip(range(self._size), index_to_pindex_generator(self._submats))
+        )
+
+        self._minscanlen = self.pseudo_index_to_index(
+            max(map(sm_to_pmsl, self._submats)))
+
+    def pi_to_sis(self, pi, (offset, multiple)):
+        """
+        max(i) s.t. offset + i*multiple <= pi
+        """
+        if pi < offset:
+            return -1
+        return (pi - offset) / multiple
+
+    def pseudo_index_to_index(self, pi):
+        """
+        Count all pseudoindex values <= pi with corresponding subset indices
+        """
+        return sum((self.pi_to_sis(pi, i) + 1 for i, _ in self._submats)) - 1
+
+    def tostr(self, depth):
+        ret = '\t'*depth + "Sum({item}):\n".format(item=self.item)
+        return ret + ''.join([i[1].tostr(depth+1) for i in self._submats])
+
+    def minscanlen(self):
+        return self._minscanlen
+
+    def size(self):
+        return self._size
+
+    def index(self, i):
+        si, submat = self._i_to_sis[i % self._size]
+        return (self.item, submat.index(si))
+
+def generate_lists(result):
+    """
+    Generates a set of tuples representing paths to concatenate
+    """
+    if type(result) is frozenset:
+        ret = []
+        for i in result:
+            ret.extend(generate_lists(i))
+        return frozenset(ret)
+    elif type(result) is tuple:
+        ret = []
+        (item, children) = result
+        for f in generate_lists(children):
+            nf = [item]
+            nf.extend(f)
+            ret.append(tuple(nf))
+        return frozenset(ret)
+    else:
+        return frozenset([(result,)])
+
+
+def generate_paths(path, result, joinf=os.path.join):
+    """
+    Generates from the result set a list of sorted paths to concatenate
+    """
+    return [reduce(joinf, i, path) for i in sorted(generate_lists(result))]
+
+
+def generate_desc(joinf, result):
+    """
+    Generates the text description of the test represented by result
+    """
+    if type(result) is frozenset:
+        ret = []
+        for i in sorted(result):
+            ret.append(generate_desc(joinf, i))
+        return '{' + ' '.join(ret) + '}'
+    elif type(result) is tuple:
+        (item, children) = result
+        cdesc = generate_desc(joinf, children)
+        return joinf(str(item), cdesc)
+    else:
+        return str(result)
diff --git a/teuthology/suite/placeholder.py b/teuthology/suite/placeholder.py
new file mode 100644 (file)
index 0000000..8f2a072
--- /dev/null
@@ -0,0 +1,104 @@
+import copy
+
+
+class Placeholder(object):
+    """
+    A placeholder for use with substitute_placeholders. Simply has a 'name'
+    attribute.
+    """
+    def __init__(self, name):
+        self.name = name
+
+
+def substitute_placeholders(input_dict, values_dict):
+    """
+    Replace any Placeholder instances with values named in values_dict. In the
+    case of None values, the key is omitted from the result.
+
+    Searches through nested dicts.
+
+    :param input_dict:  A dict which may contain one or more Placeholder
+                        instances as values.
+    :param values_dict: A dict, with keys matching the 'name' attributes of all
+                        of the Placeholder instances in the input_dict, and
+                        values to be substituted.
+    :returns:           The modified input_dict
+    """
+    input_dict = copy.deepcopy(input_dict)
+
+    def _substitute(input_dict, values_dict):
+        for key, value in input_dict.items():
+            if isinstance(value, dict):
+                _substitute(value, values_dict)
+            elif isinstance(value, Placeholder):
+                if values_dict[value.name] is None:
+                    del input_dict[key]
+                    continue
+                # If there is a Placeholder without a corresponding entry in
+                # values_dict, we will hit a KeyError - we want this.
+                input_dict[key] = values_dict[value.name]
+        return input_dict
+
+    return _substitute(input_dict, values_dict)
+
+
+# Template for the config that becomes the base for each generated job config
+dict_templ = {
+    'branch': Placeholder('ceph_branch'),
+    'sha1': Placeholder('ceph_hash'),
+    'teuthology_branch': Placeholder('teuthology_branch'),
+    'archive_upload': Placeholder('archive_upload'),
+    'archive_upload_key': Placeholder('archive_upload_key'),
+    'machine_type': Placeholder('machine_type'),
+    'nuke-on-error': True,
+    'os_type': Placeholder('distro'),
+    'overrides': {
+        'admin_socket': {
+            'branch': Placeholder('ceph_branch'),
+        },
+        'ceph': {
+            'conf': {
+                'mon': {
+                    'debug mon': 20,
+                    'debug ms': 1,
+                    'debug paxos': 20},
+                'osd': {
+                    'debug filestore': 20,
+                    'debug journal': 20,
+                    'debug ms': 1,
+                    'debug osd': 25
+                }
+            },
+            'log-whitelist': ['slow request'],
+            'sha1': Placeholder('ceph_hash'),
+        },
+        'ceph-deploy': {
+            'branch': {
+                'dev-commit': Placeholder('ceph_hash'),
+            },
+            'conf': {
+                'client': {
+                    'log file': '/var/log/ceph/ceph-$name.$pid.log'
+                },
+                'mon': {
+                    'debug mon': 1,
+                    'debug ms': 20,
+                    'debug paxos': 20,
+                    'osd default pool size': 2
+                }
+            }
+        },
+        'install': {
+            'ceph': {
+                'sha1': Placeholder('ceph_hash'),
+            }
+        },
+        'workunit': {
+            'sha1': Placeholder('ceph_hash'),
+        }
+    },
+    'suite': Placeholder('suite'),
+    'suite_branch': Placeholder('suite_branch'),
+    'suite_sha1': Placeholder('suite_hash'),
+    'tasks': [],
+}
diff --git a/teuthology/suite/run.py b/teuthology/suite/run.py
new file mode 100644 (file)
index 0000000..48acba4
--- /dev/null
@@ -0,0 +1,411 @@
+import copy
+import logging
+import os
+import pwd
+import time
+import yaml
+
+from datetime import datetime
+
+from ..config import config, JobConfig
+from ..exceptions import (BranchNotFoundError, CommitNotFoundError,)
+from ..misc import deep_merge, get_results_url
+
+from . import util
+from .build_matrix import combine_path, build_matrix
+from .placeholder import substitute_placeholders, dict_templ
+
+log = logging.getLogger(__name__)
+
+
+class Run(object):
+    WAIT_MAX_JOB_TIME = 30 * 60
+    WAIT_PAUSE = 5 * 60
+    __slots__ = (
+        'args', 'name', 'base_config', 'suite_repo_path', 'base_yaml_paths',
+        'base_args',
+    )
+
+    def __init__(self, args):
+        """
+        args must be a config.YamlConfig object
+        """
+        self.args = args
+        self.name = self.make_run_name()
+        self.base_config = self.create_initial_config()
+
+        if self.args.suite_dir:
+            self.suite_repo_path = self.args.suite_dir
+        else:
+            self.suite_repo_path = util.fetch_repos(
+                self.base_config.suite_branch, test_name=self.name)
+
+        # Interpret any relative paths as being relative to ceph-qa-suite
+        # (absolute paths are unchanged by this)
+        self.base_yaml_paths = [os.path.join(self.suite_repo_path, b) for b in
+                                self.args.base_yaml_paths]
+
+    def make_run_name(self):
+        """
+        Generate a run name. A run name looks like:
+            teuthology-2014-06-23_19:00:37-rados-dumpling-testing-basic-plana
+        """
+        user = self.args.user or pwd.getpwuid(os.getuid()).pw_name
+        # We assume timestamp is a datetime.datetime object
+        timestamp = self.args.timestamp or \
+            datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
+
+        worker = util.get_worker(self.args.machine_type)
+        return '-'.join(
+            [
+                user, str(timestamp), self.args.suite, self.args.ceph_branch,
+                self.args.kernel_branch or '-', self.args.kernel_flavor, worker
+            ]
+        )
+
+    def create_initial_config(self):
+        """
+        Put together the config file used as the basis for each job in the run.
+        Grabs hashes for the latest ceph, kernel and teuthology versions in the
+        branches specified and specifies them so we know exactly what we're
+        testing.
+
+        :returns: A JobConfig object
+        """
+        kernel_dict = self.choose_kernel()
+        ceph_hash = self.choose_ceph_hash()
+        # We don't store ceph_version because we don't use it yet outside of
+        # logging.
+        self.choose_ceph_version(ceph_hash)
+        teuthology_branch = self.choose_teuthology_branch()
+        suite_branch = self.choose_suite_branch()
+        suite_hash = self.choose_suite_hash(suite_branch)
+
+        config_input = dict(
+            suite=self.args.suite,
+            suite_branch=suite_branch,
+            suite_hash=suite_hash,
+            ceph_branch=self.args.ceph_branch,
+            ceph_hash=ceph_hash,
+            teuthology_branch=teuthology_branch,
+            machine_type=self.args.machine_type,
+            distro=self.args.distro,
+            archive_upload=config.archive_upload,
+            archive_upload_key=config.archive_upload_key,
+        )
+        conf_dict = substitute_placeholders(dict_templ, config_input)
+        conf_dict.update(kernel_dict)
+        job_config = JobConfig.from_dict(conf_dict)
+        return job_config
+
+    def choose_kernel(self):
+        # Put together a stanza specifying the kernel hash
+        if self.args.kernel_branch == 'distro':
+            kernel_hash = 'distro'
+        # Skip the stanza if no -k given
+        elif self.args.kernel_branch is None:
+            kernel_hash = None
+        else:
+            kernel_hash = util.get_gitbuilder_hash(
+                'kernel', self.args.kernel_branch, self.args.kernel_flavor,
+                self.args.machine_type, self.args.distro,
+            )
+            if not kernel_hash:
+                util.schedule_fail(
+                    message="Kernel branch '{branch}' not found".format(
+                        branch=self.args.kernel_branch), name=self.name
+                )
+        if kernel_hash:
+            log.info("kernel sha1: {hash}".format(hash=kernel_hash))
+            kernel_dict = dict(kernel=dict(kdb=True, sha1=kernel_hash))
+            if kernel_hash is not 'distro':
+                kernel_dict['kernel']['flavor'] = self.args.kernel_flavor
+        else:
+            kernel_dict = dict()
+        return kernel_dict
+
+    def choose_ceph_hash(self):
+        """
+        Get the ceph hash: if --sha1/-S is supplied, use it if it is valid, and
+        just keep the ceph_branch around.  Otherwise use the current git branch
+        tip.
+        """
+
+        if self.args.ceph_sha1:
+            ceph_hash = util.git_validate_sha1('ceph', self.args.ceph_sha1)
+            if not ceph_hash:
+                exc = CommitNotFoundError(self.args.ceph_sha1, 'ceph.git')
+                util.schedule_fail(message=str(exc), name=self.name)
+            log.info("ceph sha1 explicitly supplied")
+
+        elif self.args.ceph_branch:
+            ceph_hash = util.git_ls_remote('ceph', self.args.ceph_branch)
+            if not ceph_hash:
+                exc = BranchNotFoundError(self.args.ceph_branch, 'ceph.git')
+                util.schedule_fail(message=str(exc), name=self.name)
+
+        log.info("ceph sha1: {hash}".format(hash=ceph_hash))
+        return ceph_hash
+
+    def choose_ceph_version(self, ceph_hash):
+        if config.suite_verify_ceph_hash:
+            # Get the ceph package version
+            ceph_version = util.package_version_for_hash(
+                ceph_hash, self.args.kernel_flavor, self.args.distro,
+                self.args.machine_type,
+            )
+            if not ceph_version:
+                util.schedule_fail(
+                    "Packages for ceph hash '{ver}' not found".format(
+                        ver=ceph_hash), self.name)
+            log.info("ceph version: {ver}".format(ver=ceph_version))
+            return ceph_version
+        else:
+            log.info('skipping ceph package verification')
+
+    def choose_teuthology_branch(self):
+        teuthology_branch = self.args.teuthology_branch
+        if teuthology_branch and teuthology_branch != 'master':
+            if not util.git_branch_exists('teuthology', teuthology_branch):
+                exc = BranchNotFoundError(teuthology_branch, 'teuthology.git')
+                util.schedule_fail(message=str(exc), name=self.name)
+        elif not teuthology_branch:
+            # Decide what branch of teuthology to use
+            if util.git_branch_exists('teuthology', self.args.ceph_branch):
+                teuthology_branch = self.args.ceph_branch
+            else:
+                log.info(
+                    "branch {0} not in teuthology.git; will use master for"
+                    " teuthology".format(self.args.ceph_branch))
+                teuthology_branch = 'master'
+        log.info("teuthology branch: %s", teuthology_branch)
+        return teuthology_branch
+
+    def choose_suite_branch(self):
+        suite_branch = self.args.suite_branch
+        ceph_branch = self.args.ceph_branch
+        if suite_branch and suite_branch != 'master':
+            if not util.git_branch_exists('ceph-qa-suite', suite_branch):
+                exc = BranchNotFoundError(suite_branch, 'ceph-qa-suite.git')
+                util.schedule_fail(message=str(exc), name=self.name)
+        elif not suite_branch:
+            # Decide what branch of ceph-qa-suite to use
+            if util.git_branch_exists('ceph-qa-suite', ceph_branch):
+                suite_branch = ceph_branch
+            else:
+                log.info(
+                    "branch {0} not in ceph-qa-suite.git; will use master for"
+                    " ceph-qa-suite".format(ceph_branch))
+                suite_branch = 'master'
+        return suite_branch
+
+    def choose_suite_hash(self, suite_branch):
+        suite_hash = util.git_ls_remote('ceph-qa-suite', suite_branch)
+        if not suite_hash:
+            exc = BranchNotFoundError(suite_branch, 'ceph-qa-suite.git')
+            util.schedule_fail(message=str(exc), name=self.name)
+        log.info("ceph-qa-suite branch: %s %s", suite_branch, suite_hash)
+
+    def build_base_args(self):
+        base_args = [
+            '--name', self.name,
+            '--num', str(self.args.num),
+            '--worker', util.get_worker(self.args.machine_type),
+        ]
+        if self.args.dry_run:
+            base_args.append('--dry-run')
+        if self.args.priority is not None:
+            base_args.extend(['--priority', str(self.args.priority)])
+        if self.args.verbose:
+            base_args.append('-v')
+        if self.args.owner:
+            base_args.extend(['--owner', self.args.owner])
+        return base_args
+
+    def prepare_and_schedule(self):
+        """
+        Puts together some "base arguments" with which to execute
+        teuthology-schedule for each job, then passes them and other parameters
+        to schedule_suite(). Finally, schedules a "last-in-suite" job that
+        sends an email to the specified address (if one is configured).
+        """
+        self.base_args = self.build_base_args()
+
+        # Make sure the yaml paths are actually valid
+        for yaml_path in self.base_yaml_paths:
+            full_yaml_path = os.path.join(self.suite_repo_path, yaml_path)
+            if not os.path.exists(full_yaml_path):
+                raise IOError("File not found: " + full_yaml_path)
+
+        num_jobs = self.schedule_suite()
+
+        if self.base_config.email and num_jobs:
+            arg = copy.deepcopy(self.base_args)
+            arg.append('--last-in-suite')
+            arg.extend(['--email', self.base_config.email])
+            if self.args.timeout:
+                arg.extend(['--timeout', self.args.timeout])
+            util.teuthology_schedule(
+                args=arg,
+                dry_run=self.args.dry_run,
+                verbose=self.args.verbose,
+                log_prefix="Results email: ",
+            )
+            results_url = get_results_url(self.base_config.name)
+            if results_url:
+                log.info("Test results viewable at %s", results_url)
+
+    def schedule_suite(self):
+        """
+        Schedule the suite-run. Returns the number of jobs scheduled.
+        """
+        name = self.name
+        arch = util.get_arch(self.base_config.machine_type)
+        suite_name = self.base_config.suite
+        suite_path = os.path.join(
+            self.suite_repo_path, 'suites',
+            self.base_config.suite.replace(':', '/'))
+        log.debug('Suite %s in %s' % (suite_name, suite_path))
+        configs = [
+            (combine_path(suite_name, item[0]), item[1]) for item in
+            build_matrix(suite_path, subset=self.args.subset)
+        ]
+        log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (
+            suite_name, suite_path, len(configs)))
+
+        # used as a local cache for package versions from gitbuilder
+        package_versions = dict()
+        jobs_to_schedule = []
+        jobs_missing_packages = []
+        for description, fragment_paths in configs:
+            base_frag_paths = [
+                util.strip_fragment_path(x) for x in fragment_paths
+            ]
+            limit = self.args.limit
+            if limit > 0 and len(jobs_to_schedule) >= limit:
+                log.info(
+                    'Stopped after {limit} jobs due to --limit={limit}'.format(
+                        limit=limit))
+                break
+            # Break apart the filter parameter (one string) into comma
+            # separated components to be used in searches.
+            filter_in = self.args.filter_in
+            if filter_in:
+                filter_list = [x.strip() for x in filter_in.split(',')]
+                if not any([x in description for x in filter_list]):
+                    all_filt = []
+                    for filt_samp in filter_list:
+                        all_filt.extend(
+                            [x.find(filt_samp) < 0 for x in base_frag_paths]
+                        )
+                    if all(all_filt):
+                        continue
+            filter_out = self.args.filter_out
+            if filter_out:
+                filter_list = [x.strip() for x in filter_out.split(',')]
+                if any([x in description for x in filter_list]):
+                    continue
+                all_filt_val = False
+                for filt_samp in filter_list:
+                    flist = [filt_samp in x for x in base_frag_paths]
+                    if any(flist):
+                        all_filt_val = True
+                        continue
+                if all_filt_val:
+                    continue
+
+            raw_yaml = '\n'.join([file(a, 'r').read() for a in fragment_paths])
+
+            parsed_yaml = yaml.load(raw_yaml)
+            os_type = parsed_yaml.get('os_type') or self.base_config.os_type
+            exclude_arch = parsed_yaml.get('exclude_arch')
+            exclude_os_type = parsed_yaml.get('exclude_os_type')
+
+            if exclude_arch and exclude_arch == arch:
+                log.info('Skipping due to excluded_arch: %s facets %s',
+                         exclude_arch, description)
+                continue
+            if exclude_os_type and exclude_os_type == os_type:
+                log.info('Skipping due to excluded_os_type: %s facets %s',
+                         exclude_os_type, description)
+                continue
+
+            arg = copy.deepcopy(self.base_args)
+            arg.extend([
+                '--description', description,
+                '--',
+            ])
+            arg.extend(self.base_yaml_paths)
+            arg.extend(fragment_paths)
+
+            job = dict(
+                yaml=parsed_yaml,
+                desc=description,
+                sha1=self.base_config.sha1,
+                args=arg
+            )
+
+            if config.suite_verify_ceph_hash:
+                full_job_config = dict()
+                deep_merge(full_job_config, self.base_config.to_dict())
+                deep_merge(full_job_config, parsed_yaml)
+                flavor = util.get_install_task_flavor(full_job_config)
+                sha1 = self.base_config.sha1
+                # Get package versions for this sha1, os_type and flavor. If
+                # we've already retrieved them in a previous loop, they'll be
+                # present in package_versions and gitbuilder will not be asked
+                # again for them.
+                package_versions = util.get_package_versions(
+                    sha1,
+                    os_type,
+                    flavor,
+                    package_versions
+                )
+                if not util.has_packages_for_distro(sha1, os_type, flavor,
+                                                    package_versions):
+                    m = "Packages for os_type '{os}', flavor {flavor} and " + \
+                        "ceph hash '{ver}' not found"
+                    log.error(m.format(os=os_type, flavor=flavor, ver=sha1))
+                    jobs_missing_packages.append(job)
+
+            jobs_to_schedule.append(job)
+
+        for job in jobs_to_schedule:
+            log.info(
+                'Scheduling %s', job['desc']
+            )
+
+            log_prefix = ''
+            if job in jobs_missing_packages:
+                log_prefix = "Missing Packages: "
+                if (not self.args.dry_run and not
+                        config.suite_allow_missing_packages):
+                    util.schedule_fail(
+                        "At least one job needs packages that don't exist for "
+                        "hash {sha1}.".format(sha1=self.base_config.sha1),
+                        name,
+                    )
+            util.teuthology_schedule(
+                args=job['args'],
+                dry_run=self.args.dry_run,
+                verbose=self.args.verbose,
+                log_prefix=log_prefix,
+            )
+            throttle = self.args.throttle
+            if not self.args.dry_run and throttle:
+                log.info("pause between jobs : --throttle " + str(throttle))
+                time.sleep(int(throttle))
+
+        count = len(jobs_to_schedule)
+        missing_count = len(jobs_missing_packages)
+        log.info(
+            'Suite %s in %s scheduled %d jobs.' %
+            (suite_name, suite_path, count)
+        )
+        log.info('%d/%d jobs were filtered out.',
+                 (len(configs) - count),
+                 len(configs))
+        if missing_count:
+            log.warn('Scheduled %d/%d jobs that are missing packages!',
+                     missing_count, count)
+        return count
diff --git a/teuthology/suite/test/suites/noop/noop.yaml b/teuthology/suite/test/suites/noop/noop.yaml
new file mode 100644 (file)
index 0000000..fb674b1
--- /dev/null
@@ -0,0 +1,7 @@
+roles:
+- - mon.a
+  - osd.0
+tasks:
+- exec:
+    mon.a:
+      - echo "Well done !"
diff --git a/teuthology/suite/test/test_build_matrix.py b/teuthology/suite/test/test_build_matrix.py
new file mode 100644 (file)
index 0000000..ea547be
--- /dev/null
@@ -0,0 +1,507 @@
+import random
+
+from mock import patch, MagicMock
+
+from teuthology.suite import build_matrix
+from teuthology.test.fake_fs import make_fake_fstools
+
+
+class TestBuildMatrixSimple(object):
+    def test_combine_path(self):
+        result = build_matrix.combine_path("/path/to/left", "right/side")
+        assert result == "/path/to/left/right/side"
+
+    def test_combine_path_no_right(self):
+        result = build_matrix.combine_path("/path/to/left", None)
+        assert result == "/path/to/left"
+
+
+class TestBuildMatrix(object):
+
+    patchpoints = [
+        'os.path.exists',
+        'os.listdir',
+        'os.path.isfile',
+        'os.path.isdir',
+        '__builtin__.open',
+    ]
+
+    def setup(self):
+        self.mocks = dict()
+        self.patchers = dict()
+        for ppoint in self.__class__.patchpoints:
+            self.mocks[ppoint] = MagicMock()
+            self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint])
+
+    def start_patchers(self, fake_fs):
+        fake_fns = make_fake_fstools(fake_fs)
+        # relies on fake_fns being in same order as patchpoints
+        for ppoint, fn in zip(self.__class__.patchpoints, fake_fns):
+            self.mocks[ppoint].side_effect = fn
+        for patcher in self.patchers.values():
+            patcher.start()
+
+    def stop_patchers(self):
+        for patcher in self.patchers.values():
+            patcher.stop()
+
+    def teardown(self):
+        self.stop_patchers()
+
+    def fragment_occurences(self, jobs, fragment):
+        # What fraction of jobs contain fragment?
+        count = 0
+        for (description, fragment_list) in jobs:
+            for item in fragment_list:
+                if item.endswith(fragment):
+                    count += 1
+        return count / float(len(jobs))
+
+    def test_concatenate_1x2x3(self):
+        fake_fs = {
+            'd0_0': {
+                '+': None,
+                'd1_0': {
+                    'd1_0_0.yaml': None,
+                },
+                'd1_1': {
+                    'd1_1_0.yaml': None,
+                    'd1_1_1.yaml': None,
+                },
+                'd1_2': {
+                    'd1_2_0.yaml': None,
+                    'd1_2_1.yaml': None,
+                    'd1_2_2.yaml': None,
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('d0_0')
+        assert len(result) == 1
+
+    def test_convolve_2x2(self):
+        fake_fs = {
+            'd0_0': {
+                '%': None,
+                'd1_0': {
+                    'd1_0_0.yaml': None,
+                    'd1_0_1.yaml': None,
+                },
+                'd1_1': {
+                    'd1_1_0.yaml': None,
+                    'd1_1_1.yaml': None,
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('d0_0')
+        assert len(result) == 4
+        assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5
+
+    def test_convolve_2x2x2(self):
+        fake_fs = {
+            'd0_0': {
+                '%': None,
+                'd1_0': {
+                    'd1_0_0.yaml': None,
+                    'd1_0_1.yaml': None,
+                },
+                'd1_1': {
+                    'd1_1_0.yaml': None,
+                    'd1_1_1.yaml': None,
+                },
+                'd1_2': {
+                    'd1_2_0.yaml': None,
+                    'd1_2_1.yaml': None,
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('d0_0')
+        assert len(result) == 8
+        assert self.fragment_occurences(result, 'd1_2_0.yaml') == 0.5
+
+    def test_convolve_1x2x4(self):
+        fake_fs = {
+            'd0_0': {
+                '%': None,
+                'd1_0': {
+                    'd1_0_0.yaml': None,
+                },
+                'd1_1': {
+                    'd1_1_0.yaml': None,
+                    'd1_1_1.yaml': None,
+                },
+                'd1_2': {
+                    'd1_2_0.yaml': None,
+                    'd1_2_1.yaml': None,
+                    'd1_2_2.yaml': None,
+                    'd1_2_3.yaml': None,
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('d0_0')
+        assert len(result) == 8
+        assert self.fragment_occurences(result, 'd1_2_2.yaml') == 0.25
+
+    def test_convolve_with_concat(self):
+        fake_fs = {
+            'd0_0': {
+                '%': None,
+                'd1_0': {
+                    'd1_0_0.yaml': None,
+                },
+                'd1_1': {
+                    'd1_1_0.yaml': None,
+                    'd1_1_1.yaml': None,
+                },
+                'd1_2': {
+                    '+': None,
+                    'd1_2_0.yaml': None,
+                    'd1_2_1.yaml': None,
+                    'd1_2_2.yaml': None,
+                    'd1_2_3.yaml': None,
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('d0_0')
+        assert len(result) == 2
+        for i in result:
+            assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
+            assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
+            assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
+            assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
+
+    def test_emulate_teuthology_noceph(self):
+        fake_fs = {
+            'teuthology': {
+                'no-ceph': {
+                    '%': None,
+                    'clusters': {
+                        'single.yaml': None,
+                    },
+                    'distros': {
+                        'baremetal.yaml': None,
+                        'rhel7.0.yaml': None,
+                        'ubuntu12.04.yaml': None,
+                        'ubuntu14.04.yaml': None,
+                        'vps.yaml': None,
+                        'vps_centos6.5.yaml': None,
+                        'vps_debian7.yaml': None,
+                        'vps_rhel6.4.yaml': None,
+                        'vps_rhel6.5.yaml': None,
+                        'vps_rhel7.0.yaml': None,
+                        'vps_ubuntu14.04.yaml': None,
+                    },
+                    'tasks': {
+                        'teuthology.yaml': None,
+                    },
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('teuthology/no-ceph')
+        assert len(result) == 11
+        assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0
+
+    def test_empty_dirs(self):
+        fake_fs = {
+            'teuthology': {
+                'no-ceph': {
+                    '%': None,
+                    'clusters': {
+                        'single.yaml': None,
+                    },
+                    'distros': {
+                        'baremetal.yaml': None,
+                        'rhel7.0.yaml': None,
+                        'ubuntu12.04.yaml': None,
+                        'ubuntu14.04.yaml': None,
+                        'vps.yaml': None,
+                        'vps_centos6.5.yaml': None,
+                        'vps_debian7.yaml': None,
+                        'vps_rhel6.4.yaml': None,
+                        'vps_rhel6.5.yaml': None,
+                        'vps_rhel7.0.yaml': None,
+                        'vps_ubuntu14.04.yaml': None,
+                    },
+                    'tasks': {
+                        'teuthology.yaml': None,
+                    },
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('teuthology/no-ceph')
+        self.stop_patchers()
+
+        fake_fs2 = {
+            'teuthology': {
+                'no-ceph': {
+                    '%': None,
+                    'clusters': {
+                        'single.yaml': None,
+                    },
+                    'distros': {
+                        'empty': {},
+                        'baremetal.yaml': None,
+                        'rhel7.0.yaml': None,
+                        'ubuntu12.04.yaml': None,
+                        'ubuntu14.04.yaml': None,
+                        'vps.yaml': None,
+                        'vps_centos6.5.yaml': None,
+                        'vps_debian7.yaml': None,
+                        'vps_rhel6.4.yaml': None,
+                        'vps_rhel6.5.yaml': None,
+                        'vps_rhel7.0.yaml': None,
+                        'vps_ubuntu14.04.yaml': None,
+                    },
+                    'tasks': {
+                        'teuthology.yaml': None,
+                    },
+                    'empty': {},
+                },
+            },
+        }
+        self.start_patchers(fake_fs2)
+        result2 = build_matrix.build_matrix('teuthology/no-ceph')
+        assert len(result) == 11
+        assert len(result2) == len(result)
+
+    def test_disable_extension(self):
+        fake_fs = {
+            'teuthology': {
+                'no-ceph': {
+                    '%': None,
+                    'clusters': {
+                        'single.yaml': None,
+                    },
+                    'distros': {
+                        'baremetal.yaml': None,
+                        'rhel7.0.yaml': None,
+                        'ubuntu12.04.yaml': None,
+                        'ubuntu14.04.yaml': None,
+                        'vps.yaml': None,
+                        'vps_centos6.5.yaml': None,
+                        'vps_debian7.yaml': None,
+                        'vps_rhel6.4.yaml': None,
+                        'vps_rhel6.5.yaml': None,
+                        'vps_rhel7.0.yaml': None,
+                        'vps_ubuntu14.04.yaml': None,
+                    },
+                    'tasks': {
+                        'teuthology.yaml': None,
+                    },
+                },
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('teuthology/no-ceph')
+        self.stop_patchers()
+
+        fake_fs2 = {
+            'teuthology': {
+                'no-ceph': {
+                    '%': None,
+                    'clusters': {
+                        'single.yaml': None,
+                    },
+                    'distros': {
+                        'baremetal.yaml': None,
+                        'rhel7.0.yaml': None,
+                        'ubuntu12.04.yaml': None,
+                        'ubuntu14.04.yaml': None,
+                        'vps.yaml': None,
+                        'vps_centos6.5.yaml': None,
+                        'vps_debian7.yaml': None,
+                        'vps_rhel6.4.yaml': None,
+                        'vps_rhel6.5.yaml': None,
+                        'vps_rhel7.0.yaml': None,
+                        'vps_ubuntu14.04.yaml': None,
+                        'forcefilevps_ubuntu14.04.yaml.disable': None,
+                        'forcefilevps_ubuntu14.04.yaml.anotherextension': None,
+                    },
+                    'tasks': {
+                        'teuthology.yaml': None,
+                        'forcefilevps_ubuntu14.04notyaml': None,
+                    },
+                    'forcefilevps_ubuntu14.04notyaml': None,
+                    'tasks.disable': {
+                        'teuthology2.yaml': None,
+                        'forcefilevps_ubuntu14.04notyaml': None,
+                    },
+                },
+            },
+        }
+        self.start_patchers(fake_fs2)
+        result2 = build_matrix.build_matrix('teuthology/no-ceph')
+        assert len(result) == 11
+        assert len(result2) == len(result)
+
+    def test_sort_order(self):
+        # This test ensures that 'ceph' comes before 'ceph-thrash' when yaml
+        # fragments are sorted.
+        fake_fs = {
+            'thrash': {
+                '%': None,
+                'ceph-thrash': {'default.yaml': None},
+                'ceph': {'base.yaml': None},
+                'clusters': {'mds-1active-1standby.yaml': None},
+                'debug': {'mds_client.yaml': None},
+                'fs': {'btrfs.yaml': None},
+                'msgr-failures': {'none.yaml': None},
+                'overrides': {'whitelist_wrongly_marked_down.yaml': None},
+                'tasks': {'cfuse_workunit_suites_fsstress.yaml': None},
+            },
+        }
+        self.start_patchers(fake_fs)
+        result = build_matrix.build_matrix('thrash')
+        assert len(result) == 1
+        assert self.fragment_occurences(result, 'base.yaml') == 1
+        fragments = result[0][1]
+        assert fragments[0] == 'thrash/ceph/base.yaml'
+        assert fragments[1] == 'thrash/ceph-thrash/default.yaml'
+
+class TestSubset(object):
+    patchpoints = [
+        'os.path.exists',
+        'os.listdir',
+        'os.path.isfile',
+        'os.path.isdir',
+        '__builtin__.open',
+    ]
+
+    def setup(self):
+        self.mocks = dict()
+        self.patchers = dict()
+        for ppoint in self.__class__.patchpoints:
+            self.mocks[ppoint] = MagicMock()
+            self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint])
+
+    def start_patchers(self, fake_fs):
+        fake_fns = make_fake_fstools(fake_fs)
+        # relies on fake_fns being in same order as patchpoints
+        for ppoint, fn in zip(self.__class__.patchpoints, fake_fns):
+            self.mocks[ppoint].side_effect = fn
+        for patcher in self.patchers.values():
+            patcher.start()
+
+    def stop_patchers(self):
+        for patcher in self.patchers.values():
+            patcher.stop()
+
+    # test_random() manages start/stop patchers on its own; no teardown
+
+    MAX_FACETS = 10
+    MAX_FANOUT = 3
+    MAX_DEPTH = 3
+    MAX_SUBSET = 10
+    @staticmethod
+    def generate_fake_fs(max_facets, max_fanout, max_depth):
+        def yamilify(name):
+            return name + ".yaml"
+        def name_generator():
+            x = 0
+            while True:
+                yield(str(x))
+                x += 1
+        def generate_tree(
+                max_facets, max_fanout, max_depth, namegen, top=True):
+            if max_depth is 0:
+                return None
+            if max_facets is 0:
+                return None
+            items = random.choice(range(max_fanout))
+            if items is 0 and top:
+                items = 1
+            if items is 0:
+                return None
+            sub_max_facets = max_facets / items
+            tree = {}
+            for i in range(items):
+                subtree = generate_tree(
+                    sub_max_facets, max_fanout,
+                    max_depth - 1, namegen, top=False)
+                if subtree is not None:
+                    tree[namegen.next()] = subtree
+                else:
+                    tree[yamilify(namegen.next())] = None
+            random.choice([
+                lambda: tree.update({'%': None}),
+                lambda: None])()
+            return tree
+        return {
+            'root':  generate_tree(
+                max_facets, max_fanout, max_depth, name_generator())
+        }
+
+    @staticmethod
+    def generate_subset(maxsub):
+        den = random.choice(range(maxsub-1))+1
+        return (random.choice(range(den)), den)
+
+    @staticmethod
+    def generate_description_list(tree, subset):
+        mat, first, matlimit = build_matrix._get_matrix(
+            'root', subset=subset)
+        return [i[0] for i in build_matrix.generate_combinations(
+            'root', mat, first, matlimit)], mat, first, matlimit
+
+    @staticmethod
+    def verify_facets(tree, description_list, subset, mat, first, matlimit):
+        def flatten(tree):
+            for k,v in tree.iteritems():
+                if v is None and '.yaml' in k:
+                    yield k
+                elif v is not None and '.disable' not in k:
+                    for x in flatten(v):
+                        yield x
+
+        def pptree(tree, tabs=0):
+            ret = ""
+            for k, v in tree.iteritems():
+                if v is None:
+                    ret += ('\t'*tabs) + k.ljust(10) + "\n"
+                else:
+                    ret += ('\t'*tabs) + (k + ':').ljust(10) + "\n"
+                    ret += pptree(v, tabs+1)
+            return ret
+        for facet in flatten(tree):
+            found = False
+            for i in description_list:
+                if facet in i:
+                    found = True
+                    break
+            if not found:
+                print "tree\n{tree}\ngenerated list\n{desc}\n\nfrom matrix\n\n{matrix}\nsubset {subset} without facet {fac}".format(
+                    tree=pptree(tree),
+                    desc='\n'.join(description_list),
+                    subset=subset,
+                    matrix=str(mat),
+                    fac=facet)
+                all_desc = build_matrix.generate_combinations(
+                    'root',
+                    mat,
+                    0,
+                    mat.size())
+                for i, desc in zip(xrange(mat.size()), all_desc):
+                    if i == first:
+                        print '=========='
+                    print i, desc
+                    if i + 1 == matlimit:
+                        print '=========='
+            assert found
+
+    def test_random(self):
+        for i in xrange(10000):
+            tree = self.generate_fake_fs(
+                self.MAX_FACETS,
+                self.MAX_FANOUT,
+                self.MAX_DEPTH)
+            subset = self.generate_subset(self.MAX_SUBSET)
+            self.start_patchers(tree)
+            dlist, mat, first, matlimit = self.generate_description_list(tree, subset)
+            self.verify_facets(tree, dlist, subset, mat, first, matlimit)
+            self.stop_patchers()
diff --git a/teuthology/suite/test/test_init.py b/teuthology/suite/test/test_init.py
new file mode 100644 (file)
index 0000000..84c929d
--- /dev/null
@@ -0,0 +1,180 @@
+import os
+
+from copy import deepcopy
+
+from mock import patch, Mock, DEFAULT
+
+from teuthology import suite
+from scripts.suite import main
+from teuthology.config import config
+
+import pytest
+import time
+
+
+def get_fake_time_and_sleep():
+    m_time = Mock()
+    m_time.return_value = time.time()
+
+    def m_time_side_effect():
+        # Fake the slow passage of time
+        m_time.return_value += 0.1
+        return m_time.return_value
+    m_time.side_effect = m_time_side_effect
+
+    def f_sleep(seconds):
+        m_time.return_value += seconds
+    m_sleep = Mock(wraps=f_sleep)
+    return m_time, m_sleep
+
+
+def setup_module():
+    global m_time
+    global m_sleep
+    m_time, m_sleep = get_fake_time_and_sleep()
+    global patcher_time_sleep
+    patcher_time_sleep = patch.multiple(
+        'teuthology.suite.time',
+        time=m_time,
+        sleep=m_sleep,
+    )
+    patcher_time_sleep.start()
+
+
+def teardown_module():
+    patcher_time_sleep.stop()
+
+
+@patch.object(suite.ResultsReporter, 'get_jobs')
+def test_wait_success(m_get_jobs, caplog):
+    results = [
+        [{'status': 'queued', 'job_id': '2'}],
+        [],
+    ]
+    final = [
+        {'status': 'pass', 'job_id': '1',
+         'description': 'DESC1', 'log_href': 'http://URL1'},
+        {'status': 'fail', 'job_id': '2',
+         'description': 'DESC2', 'log_href': 'http://URL2'},
+        {'status': 'pass', 'job_id': '3',
+         'description': 'DESC3', 'log_href': 'http://URL3'},
+    ]
+
+    def get_jobs(name, **kwargs):
+        if kwargs['fields'] == ['job_id', 'status']:
+            return in_progress.pop(0)
+        else:
+            return final
+    m_get_jobs.side_effect = get_jobs
+    suite.Run.WAIT_PAUSE = 1
+
+    in_progress = deepcopy(results)
+    assert 0 == suite.wait('name', 1, 'http://UPLOAD_URL')
+    assert m_get_jobs.called_with('name', fields=['job_id', 'status'])
+    assert 0 == len(in_progress)
+    assert 'fail http://UPLOAD_URL/name/2' in caplog.text()
+
+    in_progress = deepcopy(results)
+    in_progress = deepcopy(results)
+    assert 0 == suite.wait('name', 1, None)
+    assert m_get_jobs.called_with('name', fields=['job_id', 'status'])
+    assert 0 == len(in_progress)
+    assert 'fail http://URL2' in caplog.text()
+
+
+@patch.object(suite.ResultsReporter, 'get_jobs')
+def test_wait_fails(m_get_jobs):
+    results = []
+    results.append([{'status': 'queued', 'job_id': '2'}])
+    results.append([{'status': 'queued', 'job_id': '2'}])
+    results.append([{'status': 'queued', 'job_id': '2'}])
+
+    def get_jobs(name, **kwargs):
+        return results.pop(0)
+    m_get_jobs.side_effect = get_jobs
+    suite.Run.WAIT_PAUSE = 1
+    suite.Run.WAIT_MAX_JOB_TIME = 1
+    with pytest.raises(suite.WaitException) as error:
+        suite.wait('name', 1, None)
+        assert 'abc' in str(error)
+
+
+class TestSuiteMain(object):
+    def test_main(self):
+        suite_name = 'SUITE'
+        throttle = '3'
+        machine_type = 'burnupi'
+
+        def prepare_and_schedule(obj):
+            assert obj.base_config.suite == suite_name
+            assert obj.args.throttle == throttle
+
+        def fake_str(*args, **kwargs):
+            return 'fake'
+
+        def fake_bool(*args, **kwargs):
+            return True
+
+        with patch.multiple(
+                'teuthology.suite.run.util',
+                fetch_repos=DEFAULT,
+                package_version_for_hash=fake_str,
+                git_branch_exists=fake_bool,
+                git_ls_remote=fake_str,
+                ):
+            with patch.multiple(
+                'teuthology.suite.run.Run',
+                prepare_and_schedule=prepare_and_schedule,
+            ):
+                main([
+                    '--suite', suite_name,
+                    '--throttle', throttle,
+                    '--machine-type', machine_type,
+                ])
+
+    def test_schedule_suite_noverify(self):
+        suite_name = 'noop'
+        suite_dir = os.path.dirname(__file__)
+        throttle = '3'
+        machine_type = 'burnupi'
+
+        with patch.multiple(
+            'teuthology.suite.util',
+            fetch_repos=DEFAULT,
+            teuthology_schedule=DEFAULT,
+            get_arch=lambda x: 'x86_64',
+            get_gitbuilder_hash=DEFAULT,
+            git_ls_remote=lambda *args: '1234',
+            package_version_for_hash=DEFAULT,
+        ) as m:
+            m['package_version_for_hash'].return_value = 'fake-9.5'
+            config.suite_verify_ceph_hash = False
+            main(['--suite', suite_name,
+                  '--suite-dir', suite_dir,
+                  '--throttle', throttle,
+                  '--machine-type', machine_type])
+            m_sleep.assert_called_with(int(throttle))
+            m['get_gitbuilder_hash'].assert_not_called()
+
+    def test_schedule_suite(self):
+        suite_name = 'noop'
+        suite_dir = os.path.dirname(__file__)
+        throttle = '3'
+        machine_type = 'burnupi'
+
+        with patch.multiple(
+            'teuthology.suite.util',
+            fetch_repos=DEFAULT,
+            teuthology_schedule=DEFAULT,
+            get_arch=lambda x: 'x86_64',
+            get_gitbuilder_hash=DEFAULT,
+            git_ls_remote=lambda *args: '12345',
+            package_version_for_hash=DEFAULT,
+        ) as m:
+            m['package_version_for_hash'].return_value = 'fake-9.5'
+            config.suite_verify_ceph_hash = True
+            main(['--suite', suite_name,
+                  '--suite-dir', suite_dir,
+                  '--throttle', throttle,
+                  '--machine-type', machine_type])
+            m_sleep.assert_called_with(int(throttle))
diff --git a/teuthology/suite/test/test_matrix.py b/teuthology/suite/test/test_matrix.py
new file mode 100644 (file)
index 0000000..3333e52
--- /dev/null
@@ -0,0 +1,74 @@
+from teuthology.suite import matrix
+
+
+def verify_matrix_output_diversity(res):
+    """
+    Verifies that the size of the matrix passed matches the number of unique
+    outputs from res.index
+    """
+    sz = res.size()
+    s = frozenset([matrix.generate_lists(res.index(i)) for i in range(sz)])
+    for i in range(res.size()):
+        assert sz == len(s)
+
+
+def mbs(num, l):
+    return matrix.Sum(num*10, [matrix.Base(i + (100*num)) for i in l])
+
+
+class TestMatrix(object):
+    def test_simple(self):
+        verify_matrix_output_diversity(mbs(1, range(6)))
+
+    def test_simple2(self):
+        verify_matrix_output_diversity(mbs(1, range(5)))
+
+    # The test_product* tests differ by the degree by which dimension
+    # sizes share prime factors
+    def test_product_simple(self):
+        verify_matrix_output_diversity(
+            matrix.Product(1, [mbs(1, range(6)), mbs(2, range(2))]))
+
+    def test_product_3_facets_2_prime_factors(self):
+        verify_matrix_output_diversity(matrix.Product(1, [
+                    mbs(1, range(6)),
+                    mbs(2, range(2)),
+                    mbs(3, range(3)),
+                    ]))
+
+    def test_product_3_facets_2_prime_factors_one_larger(self):
+        verify_matrix_output_diversity(matrix.Product(1, [
+                    mbs(1, range(2)),
+                    mbs(2, range(5)),
+                    mbs(4, range(4)),
+                    ]))
+
+    def test_product_4_facets_2_prime_factors(self):
+        verify_matrix_output_diversity(matrix.Sum(1, [
+                    mbs(1, range(6)),
+                    mbs(3, range(3)),
+                    mbs(2, range(2)),
+                    mbs(4, range(9)),
+                    ]))
+
+    def test_product_2_facets_2_prime_factors(self):
+        verify_matrix_output_diversity(matrix.Sum(1, [
+                    mbs(1, range(2)),
+                    mbs(2, range(5)),
+                    ]))
+
+    def test_product_with_sum(self):
+        verify_matrix_output_diversity(matrix.Sum(
+                9,
+                [
+                    mbs(10, range(6)),
+                    matrix.Product(1, [
+                            mbs(1, range(2)),
+                            mbs(2, range(5)),
+                            mbs(4, range(4))]),
+                    matrix.Product(8, [
+                            mbs(7, range(2)),
+                            mbs(6, range(5)),
+                            mbs(5, range(4))])
+                    ]
+                ))
diff --git a/teuthology/suite/test/test_placeholder.py b/teuthology/suite/test/test_placeholder.py
new file mode 100644 (file)
index 0000000..0336870
--- /dev/null
@@ -0,0 +1,43 @@
+from teuthology.suite.placeholder import (
+    substitute_placeholders, dict_templ, Placeholder
+)
+
+
+class TestPlaceholder(object):
+    def test_substitute_placeholders(self):
+        suite_hash = 'suite_hash'
+        input_dict = dict(
+            suite='suite',
+            suite_branch='suite_branch',
+            suite_hash=suite_hash,
+            ceph_branch='ceph_branch',
+            ceph_hash='ceph_hash',
+            teuthology_branch='teuthology_branch',
+            machine_type='machine_type',
+            distro='distro',
+            archive_upload='archive_upload',
+            archive_upload_key='archive_upload_key',
+        )
+        output_dict = substitute_placeholders(dict_templ, input_dict)
+        assert output_dict['suite'] == 'suite'
+        assert output_dict['suite_sha1'] == suite_hash
+        assert isinstance(dict_templ['suite'], Placeholder)
+        assert isinstance(
+            dict_templ['overrides']['admin_socket']['branch'],
+            Placeholder)
+
+    def test_null_placeholders_dropped(self):
+        input_dict = dict(
+            suite='suite',
+            suite_branch='suite_branch',
+            suite_hash='suite_hash',
+            ceph_branch='ceph_branch',
+            ceph_hash='ceph_hash',
+            teuthology_branch='teuthology_branch',
+            machine_type='machine_type',
+            archive_upload='archive_upload',
+            archive_upload_key='archive_upload_key',
+            distro=None,
+        )
+        output_dict = substitute_placeholders(dict_templ, input_dict)
+        assert 'os_type' not in output_dict
diff --git a/teuthology/suite/test/test_run_.py b/teuthology/suite/test/test_run_.py
new file mode 100644 (file)
index 0000000..7bd99b5
--- /dev/null
@@ -0,0 +1,113 @@
+import pytest
+import requests
+
+from datetime import datetime
+from mock import patch
+
+from teuthology.config import config, YamlConfig
+from teuthology.exceptions import ScheduleFailError
+from teuthology.suite import run
+
+
+class TestRun(object):
+    klass = run.Run
+
+    def setup(self):
+        self.args_dict = dict(
+            suite='suite',
+            suite_branch='suite_branch',
+            ceph_branch='ceph_branch',
+            ceph_sha1='ceph_sha1',
+            teuthology_branch='teuthology_branch',
+            kernel_branch=None,
+            kernel_flavor='kernel_flavor',
+            distro='ubuntu',
+            machine_type='machine_type',
+            base_yaml_paths=list(),
+        )
+        self.args = YamlConfig.from_dict(self.args_dict)
+
+    @patch('teuthology.suite.run.util.fetch_repos')
+    def test_name(self, m_fetch_repos):
+        stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
+        with patch.object(run.Run, 'create_initial_config',
+                          return_value=run.JobConfig()):
+            name = run.Run(self.args).name
+        assert str(stamp) in name
+
+    @patch('teuthology.suite.run.util.fetch_repos')
+    def test_name_user(self, m_fetch_repos):
+        self.args.user = 'USER'
+        with patch.object(run.Run, 'create_initial_config',
+                          return_value=run.JobConfig()):
+            name = run.Run(self.args).name
+        assert name.startswith('USER-')
+
+    @patch('teuthology.suite.run.util.git_branch_exists')
+    @patch('teuthology.suite.run.util.package_version_for_hash')
+    @patch('teuthology.suite.run.util.git_ls_remote')
+    def test_branch_nonexistent(
+        self,
+        m_git_ls_remote,
+        m_package_version_for_hash,
+        m_git_branch_exists,
+    ):
+        config.gitbuilder_host = 'example.com'
+        m_git_ls_remote.side_effect = [
+            # First call will be for the ceph hash
+            None,
+            # Second call will be for the suite hash
+            'suite_hash',
+        ]
+        m_package_version_for_hash.return_value = 'a_version'
+        m_git_branch_exists.return_value = True
+        self.args.ceph_branch = 'ceph_sha1'
+        self.args.ceph_sha1 = None
+        with pytest.raises(ScheduleFailError):
+            self.klass(self.args)
+
+    @patch('teuthology.suite.run.util.fetch_repos')
+    @patch('requests.head')
+    @patch('teuthology.suite.run.util.git_branch_exists')
+    @patch('teuthology.suite.run.util.package_version_for_hash')
+    @patch('teuthology.suite.run.util.git_ls_remote')
+    def test_sha1_exists(
+        self,
+        m_git_ls_remote,
+        m_package_version_for_hash,
+        m_git_branch_exists,
+        m_requests_head,
+        m_fetch_repos,
+    ):
+        config.gitbuilder_host = 'example.com'
+        m_package_version_for_hash.return_value = 'ceph_hash'
+        m_git_branch_exists.return_value = True
+        resp = requests.Response()
+        resp.reason = 'OK'
+        resp.status_code = 200
+        m_requests_head.return_value = resp
+        # only one call to git_ls_remote in this case
+        m_git_ls_remote.return_value = "suite_branch"
+        run = self.klass(self.args)
+        assert run.base_config.sha1 == 'ceph_sha1'
+        assert run.base_config.branch == 'ceph_branch'
+
+    @patch('requests.head')
+    @patch('teuthology.suite.util.git_branch_exists')
+    @patch('teuthology.suite.util.package_version_for_hash')
+    def test_sha1_nonexistent(
+        self,
+        m_package_version_for_hash,
+        m_git_branch_exists,
+        m_requests_head,
+    ):
+        config.gitbuilder_host = 'example.com'
+        m_package_version_for_hash.return_value = 'ceph_hash'
+        m_git_branch_exists.return_value = True
+        resp = requests.Response()
+        resp.reason = 'Not Found'
+        resp.status_code = 404
+        m_requests_head.return_value = resp
+        self.args.ceph_sha1 = 'ceph_hash_dne'
+        with pytest.raises(ScheduleFailError):
+            self.klass(self.args)
diff --git a/teuthology/suite/test/test_util.py b/teuthology/suite/test/test_util.py
new file mode 100644 (file)
index 0000000..0624ed7
--- /dev/null
@@ -0,0 +1,291 @@
+import os
+import pytest
+import tempfile
+
+from copy import deepcopy
+from mock import Mock, patch
+
+from teuthology.orchestra.opsys import OS
+from teuthology.suite import util
+
+
+@patch('subprocess.check_output')
+def test_git_branch_exists(m_check_output):
+    m_check_output.return_value = ''
+    assert False == util.git_branch_exists('ceph', 'nobranchnowaycanthappen')
+    m_check_output.return_value = 'HHH branch'
+    assert True == util.git_branch_exists('ceph', 'master')
+
+
+@pytest.fixture
+def git_repository(request):
+    d = tempfile.mkdtemp()
+    os.system("""
+    cd {d}
+    git init
+    touch A
+    git config user.email 'you@example.com'
+    git config user.name 'Your Name'
+    git add A
+    git commit -m 'A' A
+    """.format(d=d))
+
+    def fin():
+        os.system("rm -fr " + d)
+    request.addfinalizer(fin)
+    return d
+
+
+class TestUtil(object):
+    @patch('requests.get')
+    def test_get_hash_success(self, m_get):
+        mock_resp = Mock()
+        mock_resp.ok = True
+        mock_resp.text = "the_hash"
+        m_get.return_value = mock_resp
+        result = util.get_gitbuilder_hash()
+        assert result == "the_hash"
+
+    @patch('requests.get')
+    def test_get_hash_fail(self, m_get):
+        mock_resp = Mock()
+        mock_resp.ok = False
+        m_get.return_value = mock_resp
+        result = util.get_gitbuilder_hash()
+        assert result is None
+
+    @patch('requests.get')
+    def test_package_version_for_hash(self, m_get):
+        mock_resp = Mock()
+        mock_resp.ok = True
+        mock_resp.text = "the_version"
+        m_get.return_value = mock_resp
+        result = util.package_version_for_hash("hash")
+        assert result == "the_version"
+
+    @patch('requests.get')
+    def test_get_branch_info(self, m_get):
+        mock_resp = Mock()
+        mock_resp.ok = True
+        mock_resp.json.return_value = "some json"
+        m_get.return_value = mock_resp
+        result = util.get_branch_info("teuthology", "master")
+        m_get.assert_called_with(
+            "https://api.github.com/repos/ceph/teuthology/git/refs/heads/master"
+        )
+        assert result == "some json"
+
+    @patch('teuthology.suite.util.lock')
+    def test_get_arch_fail(self, m_lock):
+        m_lock.list_locks.return_value = False
+        util.get_arch('magna')
+        m_lock.list_locks.assert_called_with(machine_type="magna", count=1)
+
+    @patch('teuthology.suite.util.lock')
+    def test_get_arch_success(self, m_lock):
+        m_lock.list_locks.return_value = [{"arch": "arch"}]
+        result = util.get_arch('magna')
+        m_lock.list_locks.assert_called_with(
+            machine_type="magna",
+            count=1
+        )
+        assert result == "arch"
+
+    def test_build_git_url_github(self):
+        assert 'project' in util.build_git_url('project')
+        owner = 'OWNER'
+        git_url = util.build_git_url('project', project_owner=owner)
+        assert owner in git_url
+
+    @patch('teuthology.config.TeuthologyConfig.get_ceph_qa_suite_git_url')
+    def test_build_git_url_ceph_qa_suite_custom(
+            self,
+            m_get_ceph_qa_suite_git_url):
+        url = 'http://foo.com/some'
+        m_get_ceph_qa_suite_git_url.return_value = url + '.git'
+        assert url == util.build_git_url('ceph-qa-suite')
+
+    @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url')
+    def test_build_git_url_ceph_custom(self, m_get_ceph_git_url):
+        url = 'http://foo.com/some'
+        m_get_ceph_git_url.return_value = url + '.git'
+        assert url == util.build_git_url('ceph')
+
+    @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url')
+    def test_git_ls_remote(self, m_get_ceph_git_url, git_repository):
+        m_get_ceph_git_url.return_value = git_repository
+        assert util.git_ls_remote('ceph', 'nobranch') is None
+        assert util.git_ls_remote('ceph', 'master') is not None
+
+
+class TestFlavor(object):
+    def test_get_install_task_flavor_bare(self):
+        config = dict(
+            tasks=[
+                dict(
+                    install=dict(),
+                ),
+            ],
+        )
+        assert util.get_install_task_flavor(config) == 'basic'
+
+    def test_get_install_task_flavor_simple(self):
+        config = dict(
+            tasks=[
+                dict(
+                    install=dict(
+                        flavor='notcmalloc',
+                    ),
+                ),
+            ],
+        )
+        assert util.get_install_task_flavor(config) == 'notcmalloc'
+
+    def test_get_install_task_flavor_override_simple(self):
+        config = dict(
+            tasks=[
+                dict(install=dict()),
+            ],
+            overrides=dict(
+                install=dict(
+                    flavor='notcmalloc',
+                ),
+            ),
+        )
+        assert util.get_install_task_flavor(config) == 'notcmalloc'
+
+    def test_get_install_task_flavor_override_project(self):
+        config = dict(
+            tasks=[
+                dict(install=dict()),
+            ],
+            overrides=dict(
+                install=dict(
+                    ceph=dict(
+                        flavor='notcmalloc',
+                    ),
+                ),
+            ),
+        )
+        assert util.get_install_task_flavor(config) == 'notcmalloc'
+
+
+class TestMissingPackages(object):
+    """
+    Tests the functionality that checks to see if a
+    scheduled job will have missing packages in gitbuilder.
+    """
+    def setup(self):
+        package_versions = dict(
+            sha1=dict(
+                ubuntu=dict(
+                    basic="1.0",
+                )
+            )
+        )
+        self.pv = package_versions
+
+    def test_os_in_package_versions(self):
+        assert self.pv == util.get_package_versions(
+            "sha1",
+            "ubuntu",
+            "basic",
+            package_versions=self.pv
+        )
+
+    @patch("teuthology.suite.util.package_version_for_hash")
+    def test_os_not_in_package_versions(self, m_package_versions_for_hash):
+        m_package_versions_for_hash.return_value = "1.1"
+        result = util.get_package_versions(
+            "sha1",
+            "rhel",
+            "basic",
+            package_versions=self.pv
+        )
+        expected = deepcopy(self.pv)
+        expected['sha1'].update(dict(rhel=dict(basic="1.1")))
+        assert result == expected
+
+    @patch("teuthology.suite.util.package_version_for_hash")
+    def test_package_versions_not_found(self, m_package_versions_for_hash):
+        # if gitbuilder returns a status that's not a 200, None is returned
+        m_package_versions_for_hash.return_value = None
+        result = util.get_package_versions(
+            "sha1",
+            "rhel",
+            "basic",
+            package_versions=self.pv
+        )
+        assert result == self.pv
+
+    @patch("teuthology.suite.util.package_version_for_hash")
+    def test_no_package_versions_kwarg(self, m_package_versions_for_hash):
+        m_package_versions_for_hash.return_value = "1.0"
+        result = util.get_package_versions(
+            "sha1",
+            "ubuntu",
+            "basic",
+        )
+        expected = deepcopy(self.pv)
+        assert result == expected
+
+    def test_distro_has_packages(self):
+        result = util.has_packages_for_distro(
+            "sha1",
+            "ubuntu",
+            "basic",
+            package_versions=self.pv,
+        )
+        assert result
+
+    def test_distro_does_not_have_packages(self):
+        result = util.has_packages_for_distro(
+            "sha1",
+            "rhel",
+            "basic",
+            package_versions=self.pv,
+        )
+        assert not result
+
+    @patch("teuthology.suite.util.get_package_versions")
+    def test_has_packages_no_package_versions(self, m_get_package_versions):
+        m_get_package_versions.return_value = self.pv
+        result = util.has_packages_for_distro(
+            "sha1",
+            "rhel",
+            "basic",
+        )
+        assert not result
+
+
+class TestDistroDefaults(object):
+
+    def test_distro_defaults_saya(self):
+        expected = ('armv7l', 'saucy',
+                    OS(name='ubuntu', version='13.10', codename='saucy'))
+        assert util.get_distro_defaults('ubuntu', 'saya') == expected
+
+    def test_distro_defaults_plana(self):
+        expected = ('x86_64', 'trusty',
+                    OS(name='ubuntu', version='14.04', codename='trusty'))
+        assert util.get_distro_defaults('ubuntu', 'plana') == expected
+
+    def test_distro_defaults_debian(self):
+        expected = ('x86_64', 'wheezy',
+                    OS(name='debian', version='7', codename='wheezy'))
+        assert util.get_distro_defaults('debian', 'magna') == expected
+
+    def test_distro_defaults_centos(self):
+        expected = ('x86_64', 'centos7',
+                    OS(name='centos', version='7', codename='core'))
+        assert util.get_distro_defaults('centos', 'magna') == expected
+
+    def test_distro_defaults_fedora(self):
+        expected = ('x86_64', 'fedora20',
+                    OS(name='fedora', version='20', codename='heisenbug'))
+        assert util.get_distro_defaults('fedora', 'magna') == expected
+
+    def test_distro_defaults_default(self):
+        expected = ('x86_64', 'centos7',
+                    OS(name='centos', version='7', codename='core'))
+        assert util.get_distro_defaults('rhel', 'magna') == expected
diff --git a/teuthology/suite/util.py b/teuthology/suite/util.py
new file mode 100644 (file)
index 0000000..16755ca
--- /dev/null
@@ -0,0 +1,453 @@
+import copy
+import logging
+import os
+import re
+import requests
+import smtplib
+import socket
+import subprocess
+import sys
+
+from email.mime.text import MIMEText
+
+from .. import lock
+
+from ..config import config
+from ..exceptions import BranchNotFoundError, ScheduleFailError
+from ..misc import deep_merge
+from ..repo_utils import fetch_qa_suite, fetch_teuthology
+from ..orchestra.opsys import OS
+from ..packaging import GitbuilderProject
+from ..task.install import get_flavor
+
+log = logging.getLogger(__name__)
+
+
+def fetch_repos(branch, test_name):
+    """
+    Fetch the suite repo (and also the teuthology repo) so that we can use it
+    to build jobs. Repos are stored in ~/src/.
+
+    The reason the teuthology repo is also fetched is that currently we use
+    subprocess to call teuthology-schedule to schedule jobs so we need to make
+    sure it is up-to-date. For that reason we always fetch the master branch
+    for test scheduling, regardless of what teuthology branch is requested for
+    testing.
+
+    :returns: The path to the suite repo on disk
+    """
+    try:
+        # When a user is scheduling a test run from their own copy of
+        # teuthology, let's not wreak havoc on it.
+        if config.automated_scheduling:
+            # We use teuthology's master branch in all cases right now
+            if config.teuthology_path is None:
+                fetch_teuthology('master')
+        suite_repo_path = fetch_qa_suite(branch)
+    except BranchNotFoundError as exc:
+        schedule_fail(message=str(exc), name=test_name)
+    return suite_repo_path
+
+
+def schedule_fail(message, name=''):
+    """
+    If an email address has been specified anywhere, send an alert there. Then
+    raise a ScheduleFailError.
+    """
+    email = config.results_email
+    if email:
+        subject = "Failed to schedule {name}".format(name=name)
+        msg = MIMEText(message)
+        msg['Subject'] = subject
+        msg['From'] = config.results_sending_email
+        msg['To'] = email
+        try:
+            smtp = smtplib.SMTP('localhost')
+            smtp.sendmail(msg['From'], [msg['To']], msg.as_string())
+            smtp.quit()
+        except socket.error:
+            log.exception("Failed to connect to mail server!")
+    raise ScheduleFailError(message, name)
+
+
+def get_worker(machine_type):
+    """
+    Map a given machine_type to a beanstalkd worker. If machine_type mentions
+    multiple machine types - e.g. 'plana,mira', then this returns 'multi'.
+    Otherwise it returns what was passed.
+    """
+    if ',' in machine_type:
+        return 'multi'
+    else:
+        return machine_type
+
+
+def get_gitbuilder_hash(project='ceph', branch='master', flavor='basic',
+                        machine_type='plana', distro='ubuntu'):
+    """
+    Find the hash representing the head of the project's repository via
+    querying a gitbuilder repo.
+
+    Will return None in the case of a 404 or any other HTTP error.
+    """
+    # Alternate method for github-hosted projects - left here for informational
+    # purposes
+    # resp = requests.get(
+    #     'https://api.github.com/repos/ceph/ceph/git/refs/heads/master')
+    # hash = .json()['object']['sha']
+    (arch, release, _os) = get_distro_defaults(distro, machine_type)
+    gp = GitbuilderProject(
+        project,
+        dict(
+            branch=branch,
+            flavor=flavor,
+            os_type=distro,
+            arch=arch,
+        ),
+    )
+    return gp.sha1
+
+
+def get_distro_defaults(distro, machine_type):
+    """
+    Given a distro (e.g. 'ubuntu') and machine type, return:
+        (arch, release, pkg_type)
+
+    This is used to default to:
+        ('x86_64', 'trusty', 'deb') when passed 'ubuntu' and 'plana'
+    ('armv7l', 'saucy', 'deb') when passed 'ubuntu' and 'saya'
+    ('x86_64', 'wheezy', 'deb') when passed 'debian'
+    ('x86_64', 'fedora20', 'rpm') when passed 'fedora'
+    And ('x86_64', 'centos7', 'rpm') when passed anything else
+    """
+    arch = 'x86_64'
+    if distro in (None, 'None'):
+        os_type = 'centos'
+        os_version = '7'
+    elif distro in ('rhel', 'centos'):
+        os_type = 'centos'
+        os_version = '7'
+    elif distro == 'ubuntu':
+        os_type = distro
+        if machine_type == 'saya':
+            os_version = '13.10'
+            arch = 'armv7l'
+        else:
+            os_version = '14.04'
+    elif distro == 'debian':
+        os_type = distro
+        os_version = '7'
+    elif distro == 'fedora':
+        os_type = distro
+        os_version = '20'
+    else:
+        raise ValueError("Invalid distro value passed: %s", distro)
+    _os = OS(name=os_type, version=os_version)
+    release = GitbuilderProject._get_distro(
+        _os.name,
+        _os.version,
+        _os.codename,
+    )
+    template = "Defaults for machine_type {mtype} distro {distro}: " \
+        "arch={arch}, release={release}, pkg_type={pkg}"
+    log.debug(template.format(
+        mtype=machine_type,
+        distro=_os.name,
+        arch=arch,
+        release=release,
+        pkg=_os.package_type)
+    )
+    return (
+        arch,
+        release,
+        _os,
+    )
+
+
+def git_ls_remote(project, branch, project_owner='ceph'):
+    """
+    Find the latest sha1 for a given project's branch.
+
+    :returns: The sha1 if found; else None
+    """
+    url = build_git_url(project, project_owner)
+    cmd = "git ls-remote {} {}".format(url, branch)
+    result = subprocess.check_output(
+        cmd, shell=True).split()
+    sha1 = result[0] if result else None
+    log.debug("{} -> {}".format(cmd, sha1))
+    return sha1
+
+
+def git_validate_sha1(project, sha1, project_owner='ceph'):
+    '''
+    Use http to validate that project contains sha1
+    I can't find a way to do this with git, period, so
+    we have specific urls to HEAD for github and git.ceph.com/gitweb
+    for now
+    '''
+    url = build_git_url(project, project_owner)
+
+    if '/github.com/' in url:
+        url = '/'.join((url, 'commit', sha1))
+    elif '/git.ceph.com/' in url:
+        # kinda specific to knowing git.ceph.com is gitweb
+        url = ('http://git.ceph.com/?p=%s.git;a=blob_plain;f=.gitignore;hb=%s'
+               % (project, sha1))
+    else:
+        raise RuntimeError(
+            'git_validate_sha1: how do I check %s for a sha1?' % url
+        )
+
+    resp = requests.head(url)
+    if resp.ok:
+        return sha1
+    return None
+
+
+def build_git_url(project, project_owner='ceph'):
+    """
+    Return the git URL to clone the project
+    """
+    if project == 'ceph-qa-suite':
+        base = config.get_ceph_qa_suite_git_url()
+    elif project == 'ceph':
+        base = config.get_ceph_git_url()
+    else:
+        base = 'https://github.com/{project_owner}/{project}'
+    url_templ = re.sub('\.git$', '', base)
+    return url_templ.format(project_owner=project_owner, project=project)
+
+
+def git_branch_exists(project, branch, project_owner='ceph'):
+    """
+    Query the git repository to check the existence of a project's branch
+    """
+    return git_ls_remote(project, branch, project_owner) is not None
+
+
+def get_branch_info(project, branch, project_owner='ceph'):
+    """
+    NOTE: This is currently not being used because of GitHub's API rate
+    limiting. We use github_branch_exists() instead.
+
+    Use the GitHub API to query a project's branch. Returns:
+        {u'object': {u'sha': <a_sha_string>,
+                    u'type': <string>,
+                    u'url': <url_to_commit>},
+        u'ref': u'refs/heads/<branch>',
+        u'url': <url_to_branch>}
+
+    We mainly use this to check if a branch exists.
+    """
+    url_templ = 'https://api.github.com/repos/{project_owner}/{project}/git/refs/heads/{branch}'  # noqa
+    url = url_templ.format(project_owner=project_owner, project=project,
+                           branch=branch)
+    resp = requests.get(url)
+    if resp.ok:
+        return resp.json()
+
+
+def package_version_for_hash(hash, kernel_flavor='basic',
+                             distro='rhel', machine_type='plana'):
+    """
+    Does what it says on the tin. Uses gitbuilder repos.
+
+    :returns: a string.
+    """
+    (arch, release, _os) = get_distro_defaults(distro, machine_type)
+    if distro in (None, 'None'):
+        distro = _os.name
+    gp = GitbuilderProject(
+        'ceph',
+        dict(
+            flavor=kernel_flavor,
+            os_type=distro,
+            arch=arch,
+            sha1=hash,
+        ),
+    )
+    return gp.version
+
+
+def get_arch(machine_type):
+    """
+    Based on a given machine_type, return its architecture by querying the lock
+    server.
+
+    :returns: A string or None
+    """
+    result = lock.list_locks(machine_type=machine_type, count=1)
+    if not result:
+        log.warn("No machines found with machine_type %s!", machine_type)
+    else:
+        return result[0]['arch']
+
+
+def strip_fragment_path(original_path):
+    """
+    Given a path, remove the text before '/suites/'.  Part of the fix for
+    http://tracker.ceph.com/issues/15470
+    """
+    scan_after = '/suites/'
+    scan_start = original_path.find(scan_after)
+    if scan_start > 0:
+        return original_path[scan_start + len(scan_after):]
+    return original_path
+
+
+def get_install_task_flavor(job_config):
+    """
+    Pokes through the install task's configuration (including its overrides) to
+    figure out which flavor it will want to install.
+
+    Only looks at the first instance of the install task in job_config.
+    """
+    project, = job_config.get('project', 'ceph'),
+    tasks = job_config.get('tasks', dict())
+    overrides = job_config.get('overrides', dict())
+    install_overrides = overrides.get('install', dict())
+    project_overrides = install_overrides.get(project, dict())
+    first_install_config = dict()
+    for task in tasks:
+        if task.keys()[0] == 'install':
+            first_install_config = task.values()[0] or dict()
+            break
+    first_install_config = copy.deepcopy(first_install_config)
+    deep_merge(first_install_config, install_overrides)
+    deep_merge(first_install_config, project_overrides)
+    return get_flavor(first_install_config)
+
+
+def get_package_versions(sha1, os_type, kernel_flavor, package_versions=None):
+    """
+    Will retrieve the package versions for the given sha1, os_type and
+    kernel_flavor from gitbuilder.
+
+    Optionally, a package_versions dict can be provided
+    from previous calls to this function to avoid calling gitbuilder for
+    information we've already retrieved.
+
+    The package_versions dict will be in the following format::
+
+        {
+            "sha1": {
+                "ubuntu": {
+                    "basic": "version",
+                    }
+                "rhel": {
+                    "basic": "version",
+                    }
+            },
+            "another-sha1": {
+                "ubuntu": {
+                    "basic": "version",
+                    }
+            }
+        }
+
+    :param sha1:             The sha1 hash of the ceph version.
+    :param os_type:          The distro we want to get packages for, given
+                             the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
+    :param kernel_flavor:    The kernel flavor
+    :param package_versions: Use this optionally to use cached results of
+                             previous calls to gitbuilder.
+    :returns:                A dict of package versions. Will return versions
+                             for all hashs and distros, not just for the given
+                             hash and distro.
+    """
+    if not package_versions:
+        package_versions = dict()
+
+    os_type = str(os_type)
+
+    os_types = package_versions.get(sha1, dict())
+    package_versions_for_flavor = os_types.get(os_type, dict())
+    if kernel_flavor not in package_versions_for_flavor:
+        package_version = package_version_for_hash(
+            sha1,
+            kernel_flavor,
+            distro=os_type
+        )
+        package_versions_for_flavor[kernel_flavor] = package_version
+        os_types[os_type] = package_versions_for_flavor
+        package_versions[sha1] = os_types
+
+    return package_versions
+
+
+def has_packages_for_distro(sha1, os_type, kernel_flavor,
+                            package_versions=None):
+    """
+    Checks to see if gitbuilder has packages for the given sha1, os_type and
+    kernel_flavor.
+
+    Optionally, a package_versions dict can be provided
+    from previous calls to this function to avoid calling gitbuilder for
+    information we've already retrieved.
+
+    The package_versions dict will be in the following format::
+
+        {
+            "sha1": {
+                "ubuntu": {
+                    "basic": "version",
+                    }
+                "rhel": {
+                    "basic": "version",
+                    }
+            },
+            "another-sha1": {
+                "ubuntu": {
+                    "basic": "version",
+                    }
+            }
+        }
+
+    :param sha1:             The sha1 hash of the ceph version.
+    :param os_type:          The distro we want to get packages for, given
+                             the ceph sha1. Ex. 'ubuntu', 'rhel', etc.
+    :param kernel_flavor:    The kernel flavor
+    :param package_versions: Use this optionally to use cached results of
+                             previous calls to gitbuilder.
+    :returns:                True, if packages are found. False otherwise.
+    """
+    os_type = str(os_type)
+    if not package_versions:
+        package_versions = get_package_versions(sha1, os_type, kernel_flavor)
+
+    package_versions_for_hash = package_versions.get(sha1, dict()).get(
+        os_type, dict())
+    # we want to return a boolean here, not the actual package versions
+    return bool(package_versions_for_hash.get(kernel_flavor, None))
+
+
+def teuthology_schedule(args, verbose, dry_run, log_prefix=''):
+    """
+    Run teuthology-schedule to schedule individual jobs.
+
+    If --dry-run has been passed but --verbose has been passed just once, don't
+    actually run the command - only print what would be executed.
+
+    If --dry-run has been passed and --verbose has been passed multiple times,
+    do both.
+    """
+    exec_path = os.path.join(
+        os.path.dirname(sys.argv[0]),
+        'teuthology-schedule')
+    args.insert(0, exec_path)
+    if dry_run:
+        # Quote any individual args so that individual commands can be copied
+        # and pasted in order to execute them individually.
+        printable_args = []
+        for item in args:
+            if ' ' in item:
+                printable_args.append("'%s'" % item)
+            else:
+                printable_args.append(item)
+        log.info('{0}{1}'.format(
+            log_prefix,
+            ' '.join(printable_args),
+        ))
+    if not dry_run or (dry_run and verbose > 1):
+        subprocess.check_call(args=args)
diff --git a/teuthology/test/suites/noop/noop.yaml b/teuthology/test/suites/noop/noop.yaml
deleted file mode 100644 (file)
index fb674b1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-roles:
-- - mon.a
-  - osd.0
-tasks:
-- exec:
-    mon.a:
-      - echo "Well done !"
diff --git a/teuthology/test/test_matrix.py b/teuthology/test/test_matrix.py
deleted file mode 100644 (file)
index bad5efc..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-from .. import matrix
-
-def verify_matrix_output_diversity(res):
-    """
-    Verifies that the size of the matrix passed matches the number of unique
-    outputs from res.index
-    """
-    sz = res.size()
-    s = frozenset([matrix.generate_lists(res.index(i)) for i in range(sz)])
-    for i in range(res.size()):
-        assert sz == len(s)
-
-def mbs(num, l):
-    return matrix.Sum(num*10, [matrix.Base(i + (100*num)) for i in l])
-
-class TestMatrix(object):
-    def test_simple(self):
-        verify_matrix_output_diversity(mbs(1, range(6)))
-
-    def test_simple2(self):
-        verify_matrix_output_diversity(mbs(1, range(5)))
-
-    # The test_product* tests differ by the degree by which dimension
-    # sizes share prime factors
-    def test_product_simple(self):
-        verify_matrix_output_diversity(
-            matrix.Product(1, [mbs(1, range(6)), mbs(2, range(2))]))
-
-    def test_product_3_facets_2_prime_factors(self):
-        verify_matrix_output_diversity(matrix.Product(1, [
-                    mbs(1, range(6)),
-                    mbs(2, range(2)),
-                    mbs(3, range(3)),
-                    ]))
-
-    def test_product_3_facets_2_prime_factors_one_larger(self):
-        verify_matrix_output_diversity(matrix.Product(1, [
-                    mbs(1, range(2)),
-                    mbs(2, range(5)),
-                    mbs(4, range(4)),
-                    ]))
-
-    def test_product_4_facets_2_prime_factors(self):
-        verify_matrix_output_diversity(matrix.Sum(1, [
-                    mbs(1, range(6)),
-                    mbs(3, range(3)),
-                    mbs(2, range(2)),
-                    mbs(4, range(9)),
-                    ]))
-
-    def test_product_2_facets_2_prime_factors(self):
-        verify_matrix_output_diversity(matrix.Sum(1, [
-                    mbs(1, range(2)),
-                    mbs(2, range(5)),
-                    ]))
-
-    def test_product_with_sum(self):
-        verify_matrix_output_diversity(matrix.Sum(
-                9,
-                [
-                    mbs(10, range(6)),
-                    matrix.Product(1, [
-                            mbs(1, range(2)),
-                            mbs(2, range(5)),
-                            mbs(4, range(4))]),
-                    matrix.Product(8, [
-                            mbs(7, range(2)),
-                            mbs(6, range(5)),
-                            mbs(5, range(4))])
-                    ]
-                ))
diff --git a/teuthology/test/test_suite.py b/teuthology/test/test_suite.py
deleted file mode 100644 (file)
index 0035465..0000000
+++ /dev/null
@@ -1,1096 +0,0 @@
-from copy import deepcopy
-from datetime import datetime
-
-from mock import patch, Mock, DEFAULT, MagicMock
-
-from fake_fs import make_fake_fstools
-from teuthology import suite
-from scripts.suite import main
-from teuthology.config import config, YamlConfig
-from teuthology.orchestra.opsys import OS
-
-import os
-import pytest
-import tempfile
-import time
-import random
-import requests     # to mock a Response
-
-
-def get_fake_time_and_sleep():
-    m_time = Mock()
-    m_time.return_value = time.time()
-
-    def m_time_side_effect():
-        # Fake the slow passage of time
-        m_time.return_value += 0.1
-        return m_time.return_value
-    m_time.side_effect = m_time_side_effect
-
-    def f_sleep(seconds):
-        m_time.return_value += seconds
-    m_sleep = Mock(wraps=f_sleep)
-    return m_time, m_sleep
-
-
-def setup_module():
-    global m_time
-    global m_sleep
-    m_time, m_sleep = get_fake_time_and_sleep()
-    global patcher_time_sleep
-    patcher_time_sleep = patch.multiple(
-        'teuthology.suite.time',
-        time=m_time,
-        sleep=m_sleep,
-    )
-    patcher_time_sleep.start()
-
-
-def teardown_module():
-    patcher_time_sleep.stop()
-
-
-@pytest.fixture
-def git_repository(request):
-    d = tempfile.mkdtemp()
-    os.system("""
-    cd {d}
-    git init
-    touch A
-    git config user.email 'you@example.com'
-    git config user.name 'Your Name'
-    git add A
-    git commit -m 'A' A
-    """.format(d=d))
-    def fin():
-        os.system("rm -fr " + d)
-    request.addfinalizer(fin)
-    return d
-
-
-class TestSuiteOffline(object):
-    def test_substitute_placeholders(self):
-        suite_hash = 'suite_hash'
-        input_dict = dict(
-            suite='suite',
-            suite_branch='suite_branch',
-            suite_hash=suite_hash,
-            ceph_branch='ceph_branch',
-            ceph_hash='ceph_hash',
-            teuthology_branch='teuthology_branch',
-            machine_type='machine_type',
-            distro='distro',
-            archive_upload='archive_upload',
-            archive_upload_key='archive_upload_key',
-        )
-        output_dict = suite.substitute_placeholders(suite.dict_templ,
-                                                    input_dict)
-        assert output_dict['suite'] == 'suite'
-        assert output_dict['suite_sha1'] == suite_hash
-        assert isinstance(suite.dict_templ['suite'], suite.Placeholder)
-        assert isinstance(
-            suite.dict_templ['overrides']['admin_socket']['branch'],
-            suite.Placeholder)
-
-    def test_null_placeholders_dropped(self):
-        input_dict = dict(
-            suite='suite',
-            suite_branch='suite_branch',
-            suite_hash='suite_hash',
-            ceph_branch='ceph_branch',
-            ceph_hash='ceph_hash',
-            teuthology_branch='teuthology_branch',
-            machine_type='machine_type',
-            archive_upload='archive_upload',
-            archive_upload_key='archive_upload_key',
-            distro=None,
-        )
-        output_dict = suite.substitute_placeholders(suite.dict_templ,
-                                                    input_dict)
-        assert 'os_type' not in output_dict
-
-    @patch('requests.get')
-    def test_get_hash_success(self, m_get):
-        mock_resp = Mock()
-        mock_resp.ok = True
-        mock_resp.text = "the_hash"
-        m_get.return_value = mock_resp
-        result = suite.get_gitbuilder_hash()
-        assert result == "the_hash"
-
-    @patch('requests.get')
-    def test_get_hash_fail(self, m_get):
-        mock_resp = Mock()
-        mock_resp.ok = False
-        m_get.return_value = mock_resp
-        result = suite.get_gitbuilder_hash()
-        assert result is None
-
-    @patch('requests.get')
-    def test_package_version_for_hash(self, m_get):
-        mock_resp = Mock()
-        mock_resp.ok = True
-        mock_resp.text = "the_version"
-        m_get.return_value = mock_resp
-        result = suite.package_version_for_hash("hash")
-        assert result == "the_version"
-
-    @patch('requests.get')
-    def test_get_branch_info(self, m_get):
-        mock_resp = Mock()
-        mock_resp.ok = True
-        mock_resp.json.return_value = "some json"
-        m_get.return_value = mock_resp
-        result = suite.get_branch_info("teuthology", "master")
-        m_get.assert_called_with(
-            "https://api.github.com/repos/ceph/teuthology/git/refs/heads/master"
-        )
-        assert result == "some json"
-
-    @patch('teuthology.suite.lock')
-    def test_get_arch_fail(self, m_lock):
-        m_lock.list_locks.return_value = False
-        suite.get_arch('magna')
-        m_lock.list_locks.assert_called_with(machine_type="magna", count=1)
-
-    @patch('teuthology.suite.lock')
-    def test_get_arch_success(self, m_lock):
-        m_lock.list_locks.return_value = [{"arch": "arch"}]
-        result = suite.get_arch('magna')
-        m_lock.list_locks.assert_called_with(
-            machine_type="magna",
-            count=1
-        )
-        assert result == "arch"
-
-    def test_combine_path(self):
-        result = suite.combine_path("/path/to/left", "right/side")
-        assert result == "/path/to/left/right/side"
-
-    def test_combine_path_no_right(self):
-        result = suite.combine_path("/path/to/left", None)
-        assert result == "/path/to/left"
-
-    def test_build_git_url_github(self):
-        assert 'project' in suite.build_git_url('project')
-        owner = 'OWNER'
-        assert owner in suite.build_git_url('project', project_owner=owner)
-
-    @patch('teuthology.config.TeuthologyConfig.get_ceph_qa_suite_git_url')
-    def test_build_git_url_ceph_qa_suite_custom(self, m_get_ceph_qa_suite_git_url):
-        url = 'http://foo.com/some'
-        m_get_ceph_qa_suite_git_url.return_value = url + '.git'
-        assert url == suite.build_git_url('ceph-qa-suite')
-
-    @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url')
-    def test_build_git_url_ceph_custom(self, m_get_ceph_git_url):
-        url = 'http://foo.com/some'
-        m_get_ceph_git_url.return_value = url + '.git'
-        assert url == suite.build_git_url('ceph')
-
-    @patch('teuthology.config.TeuthologyConfig.get_ceph_git_url')
-    def test_git_ls_remote(self, m_get_ceph_git_url, git_repository):
-        m_get_ceph_git_url.return_value = git_repository
-        assert None == suite.git_ls_remote('ceph', 'nobranch')
-        assert suite.git_ls_remote('ceph', 'master') is not None
-
-
-class TestFlavor(object):
-    def test_get_install_task_flavor_bare(self):
-        config = dict(
-            tasks=[
-                dict(
-                    install=dict(),
-                ),
-            ],
-        )
-        assert suite.get_install_task_flavor(config) == 'basic'
-
-    def test_get_install_task_flavor_simple(self):
-        config = dict(
-            tasks=[
-                dict(
-                    install=dict(
-                        flavor='notcmalloc',
-                    ),
-                ),
-            ],
-        )
-        assert suite.get_install_task_flavor(config) == 'notcmalloc'
-
-    def test_get_install_task_flavor_override_simple(self):
-        config = dict(
-            tasks=[
-                dict(install=dict()),
-            ],
-            overrides=dict(
-                install=dict(
-                    flavor='notcmalloc',
-                ),
-            ),
-        )
-        assert suite.get_install_task_flavor(config) == 'notcmalloc'
-
-    def test_get_install_task_flavor_override_project(self):
-        config = dict(
-            tasks=[
-                dict(install=dict()),
-            ],
-            overrides=dict(
-                install=dict(
-                    ceph=dict(
-                        flavor='notcmalloc',
-                    ),
-                ),
-            ),
-        )
-        assert suite.get_install_task_flavor(config) == 'notcmalloc'
-
-
-class TestRun(object):
-    klass = suite.Run
-
-    def setup(self):
-        self.args_dict = dict(
-            suite='suite',
-            suite_branch='suite_branch',
-            ceph_branch='ceph_branch',
-            ceph_sha1='ceph_sha1',
-            teuthology_branch='teuthology_branch',
-            kernel_branch=None,
-            kernel_flavor='kernel_flavor',
-            distro='ubuntu',
-            machine_type='machine_type',
-            base_yaml_paths=list(),
-        )
-        self.args = suite.YamlConfig.from_dict(self.args_dict)
-
-    @patch('teuthology.suite.fetch_repos')
-    def test_name(self, m_fetch_repos):
-        stamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
-        with patch.object(suite.Run, 'create_initial_config',
-                          return_value=suite.JobConfig()):
-            name = suite.Run(self.args).name
-        assert str(stamp) in name
-
-    @patch('teuthology.suite.fetch_repos')
-    def test_name_user(self, m_fetch_repos):
-        self.args.user = 'USER'
-        with patch.object(suite.Run, 'create_initial_config',
-                          return_value=suite.JobConfig()):
-            name = suite.Run(self.args).name
-        assert name.startswith('USER-')
-
-    @patch('teuthology.suite.git_branch_exists')
-    @patch('teuthology.suite.package_version_for_hash')
-    @patch('teuthology.suite.git_ls_remote')
-    def test_branch_nonexistent(
-        self,
-        m_git_ls_remote,
-        m_package_version_for_hash,
-        m_git_branch_exists,
-    ):
-        config.gitbuilder_host = 'example.com'
-        m_git_ls_remote.side_effect = [
-            # First call will be for the ceph hash
-            None,
-            # Second call will be for the suite hash
-            'suite_hash',
-        ]
-        m_package_version_for_hash.return_value = 'a_version'
-        m_git_branch_exists.return_value = True
-        self.args.ceph_branch = 'ceph_sha1'
-        self.args.ceph_sha1 = None
-        with pytest.raises(suite.ScheduleFailError):
-            self.klass(self.args)
-
-    @patch('teuthology.suite.fetch_repos')
-    @patch('requests.head')
-    @patch('teuthology.suite.git_branch_exists')
-    @patch('teuthology.suite.package_version_for_hash')
-    @patch('teuthology.suite.git_ls_remote')
-    def test_sha1_exists(
-        self,
-        m_git_ls_remote,
-        m_package_version_for_hash,
-        m_git_branch_exists,
-        m_requests_head,
-        m_fetch_repos,
-    ):
-        config.gitbuilder_host = 'example.com'
-        m_package_version_for_hash.return_value = 'ceph_hash'
-        m_git_branch_exists.return_value = True
-        resp = requests.Response()
-        resp.reason = 'OK'
-        resp.status_code = 200
-        m_requests_head.return_value = resp
-        # only one call to git_ls_remote in this case
-        m_git_ls_remote.return_value = "suite_branch"
-        run = self.klass(self.args)
-        assert run.base_config.sha1 == 'ceph_sha1'
-        assert run.base_config.branch == 'ceph_branch'
-
-    @patch('requests.head')
-    @patch('teuthology.suite.git_branch_exists')
-    @patch('teuthology.suite.package_version_for_hash')
-    def test_sha1_nonexistent(
-        self,
-        m_package_version_for_hash,
-        m_git_branch_exists,
-        m_requests_head,
-    ):
-        config.gitbuilder_host = 'example.com'
-        m_package_version_for_hash.return_value = 'ceph_hash'
-        m_git_branch_exists.return_value = True
-        resp = requests.Response()
-        resp.reason = 'Not Found'
-        resp.status_code = 404
-        m_requests_head.return_value = resp
-        self.args.ceph_sha1 = 'ceph_hash_dne'
-        with pytest.raises(suite.ScheduleFailError):
-            self.klass(self.args)
-
-
-class TestMissingPackages(object):
-    """
-    Tests the functionality that checks to see if a
-    scheduled job will have missing packages in gitbuilder.
-    """
-    def setup(self):
-        package_versions = dict(
-            sha1=dict(
-                ubuntu=dict(
-                    basic="1.0",
-                )
-            )
-        )
-        self.pv = package_versions
-
-    def test_os_in_package_versions(self):
-        assert self.pv == suite.get_package_versions(
-            "sha1",
-            "ubuntu",
-            "basic",
-            package_versions=self.pv
-        )
-
-    @patch("teuthology.suite.package_version_for_hash")
-    def test_os_not_in_package_versions(self, m_package_versions_for_hash):
-        m_package_versions_for_hash.return_value = "1.1"
-        result = suite.get_package_versions(
-            "sha1",
-            "rhel",
-            "basic",
-            package_versions=self.pv
-        )
-        expected = deepcopy(self.pv)
-        expected['sha1'].update(dict(rhel=dict(basic="1.1")))
-        assert result == expected
-
-    @patch("teuthology.suite.package_version_for_hash")
-    def test_package_versions_not_found(self, m_package_versions_for_hash):
-        # if gitbuilder returns a status that's not a 200, None is returned
-        m_package_versions_for_hash.return_value = None
-        result = suite.get_package_versions(
-            "sha1",
-            "rhel",
-            "basic",
-            package_versions=self.pv
-        )
-        assert result == self.pv
-
-    @patch("teuthology.suite.package_version_for_hash")
-    def test_no_package_versions_kwarg(self, m_package_versions_for_hash):
-        m_package_versions_for_hash.return_value = "1.0"
-        result = suite.get_package_versions(
-            "sha1",
-            "ubuntu",
-            "basic",
-        )
-        expected = deepcopy(self.pv)
-        assert result == expected
-
-    def test_distro_has_packages(self):
-        result = suite.has_packages_for_distro(
-            "sha1",
-            "ubuntu",
-            "basic",
-            package_versions=self.pv,
-        )
-        assert result
-
-    def test_distro_does_not_have_packages(self):
-        result = suite.has_packages_for_distro(
-            "sha1",
-            "rhel",
-            "basic",
-            package_versions=self.pv,
-        )
-        assert not result
-
-    @patch("teuthology.suite.get_package_versions")
-    def test_has_packages_no_package_versions(self, m_get_package_versions):
-        m_get_package_versions.return_value = self.pv
-        result = suite.has_packages_for_distro(
-            "sha1",
-            "rhel",
-            "basic",
-        )
-        assert not result
-
-
-class TestDistroDefaults(object):
-
-    def test_distro_defaults_saya(self):
-        expected = ('armv7l', 'saucy',
-                    OS(name='ubuntu', version='13.10', codename='saucy'))
-        assert suite.get_distro_defaults('ubuntu', 'saya') == expected
-
-    def test_distro_defaults_plana(self):
-        expected = ('x86_64', 'trusty',
-                    OS(name='ubuntu', version='14.04', codename='trusty'))
-        assert suite.get_distro_defaults('ubuntu', 'plana') == expected
-
-    def test_distro_defaults_debian(self):
-        expected = ('x86_64', 'wheezy',
-                    OS(name='debian', version='7', codename='wheezy'))
-        assert suite.get_distro_defaults('debian', 'magna') == expected
-
-    def test_distro_defaults_centos(self):
-        expected = ('x86_64', 'centos7',
-                    OS(name='centos', version='7', codename='core'))
-        assert suite.get_distro_defaults('centos', 'magna') == expected
-
-    def test_distro_defaults_fedora(self):
-        expected = ('x86_64', 'fedora20',
-                    OS(name='fedora', version='20', codename='heisenbug'))
-        assert suite.get_distro_defaults('fedora', 'magna') == expected
-
-    def test_distro_defaults_default(self):
-        expected = ('x86_64', 'centos7',
-                    OS(name='centos', version='7', codename='core'))
-        assert suite.get_distro_defaults('rhel', 'magna') == expected
-
-
-class TestBuildMatrix(object):
-
-    patchpoints = [
-        'os.path.exists',
-        'os.listdir',
-        'os.path.isfile',
-        'os.path.isdir',
-        '__builtin__.open',
-    ]
-
-    def setup(self):
-        self.mocks = dict()
-        self.patchers = dict()
-        for ppoint in self.__class__.patchpoints:
-            self.mocks[ppoint] = MagicMock()
-            self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint])
-
-    def start_patchers(self, fake_fs):
-        fake_fns = make_fake_fstools(fake_fs)
-        # relies on fake_fns being in same order as patchpoints
-        for ppoint, fn in zip(self.__class__.patchpoints, fake_fns):
-            self.mocks[ppoint].side_effect = fn
-        for patcher in self.patchers.values():
-            patcher.start()
-
-    def stop_patchers(self):
-        for patcher in self.patchers.values():
-            patcher.stop()
-
-    def teardown(self):
-        self.stop_patchers()
-
-    def fragment_occurences(self, jobs, fragment):
-        # What fraction of jobs contain fragment?
-        count = 0
-        for (description, fragment_list) in jobs:
-            for item in fragment_list:
-                if item.endswith(fragment):
-                    count += 1
-        return count / float(len(jobs))
-
-    def test_concatenate_1x2x3(self):
-        fake_fs = {
-            'd0_0': {
-                '+': None,
-                'd1_0': {
-                    'd1_0_0.yaml': None,
-                },
-                'd1_1': {
-                    'd1_1_0.yaml': None,
-                    'd1_1_1.yaml': None,
-                },
-                'd1_2': {
-                    'd1_2_0.yaml': None,
-                    'd1_2_1.yaml': None,
-                    'd1_2_2.yaml': None,
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('d0_0')
-        assert len(result) == 1
-
-    def test_convolve_2x2(self):
-        fake_fs = {
-            'd0_0': {
-                '%': None,
-                'd1_0': {
-                    'd1_0_0.yaml': None,
-                    'd1_0_1.yaml': None,
-                },
-                'd1_1': {
-                    'd1_1_0.yaml': None,
-                    'd1_1_1.yaml': None,
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('d0_0')
-        assert len(result) == 4
-        assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5
-
-    def test_convolve_2x2x2(self):
-        fake_fs = {
-            'd0_0': {
-                '%': None,
-                'd1_0': {
-                    'd1_0_0.yaml': None,
-                    'd1_0_1.yaml': None,
-                },
-                'd1_1': {
-                    'd1_1_0.yaml': None,
-                    'd1_1_1.yaml': None,
-                },
-                'd1_2': {
-                    'd1_2_0.yaml': None,
-                    'd1_2_1.yaml': None,
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('d0_0')
-        assert len(result) == 8
-        assert self.fragment_occurences(result, 'd1_2_0.yaml') == 0.5
-
-    def test_convolve_1x2x4(self):
-        fake_fs = {
-            'd0_0': {
-                '%': None,
-                'd1_0': {
-                    'd1_0_0.yaml': None,
-                },
-                'd1_1': {
-                    'd1_1_0.yaml': None,
-                    'd1_1_1.yaml': None,
-                },
-                'd1_2': {
-                    'd1_2_0.yaml': None,
-                    'd1_2_1.yaml': None,
-                    'd1_2_2.yaml': None,
-                    'd1_2_3.yaml': None,
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('d0_0')
-        assert len(result) == 8
-        assert self.fragment_occurences(result, 'd1_2_2.yaml') == 0.25
-
-    def test_convolve_with_concat(self):
-        fake_fs = {
-            'd0_0': {
-                '%': None,
-                'd1_0': {
-                    'd1_0_0.yaml': None,
-                },
-                'd1_1': {
-                    'd1_1_0.yaml': None,
-                    'd1_1_1.yaml': None,
-                },
-                'd1_2': {
-                    '+': None,
-                    'd1_2_0.yaml': None,
-                    'd1_2_1.yaml': None,
-                    'd1_2_2.yaml': None,
-                    'd1_2_3.yaml': None,
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('d0_0')
-        assert len(result) == 2
-        for i in result:
-            assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
-            assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
-            assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
-            assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
-
-    def test_emulate_teuthology_noceph(self):
-        fake_fs = {
-            'teuthology': {
-                'no-ceph': {
-                    '%': None,
-                    'clusters': {
-                        'single.yaml': None,
-                    },
-                    'distros': {
-                        'baremetal.yaml': None,
-                        'rhel7.0.yaml': None,
-                        'ubuntu12.04.yaml': None,
-                        'ubuntu14.04.yaml': None,
-                        'vps.yaml': None,
-                        'vps_centos6.5.yaml': None,
-                        'vps_debian7.yaml': None,
-                        'vps_rhel6.4.yaml': None,
-                        'vps_rhel6.5.yaml': None,
-                        'vps_rhel7.0.yaml': None,
-                        'vps_ubuntu14.04.yaml': None,
-                    },
-                    'tasks': {
-                        'teuthology.yaml': None,
-                    },
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('teuthology/no-ceph')
-        assert len(result) == 11
-        assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0
-
-    def test_empty_dirs(self):
-        fake_fs = {
-            'teuthology': {
-                'no-ceph': {
-                    '%': None,
-                    'clusters': {
-                        'single.yaml': None,
-                    },
-                    'distros': {
-                        'baremetal.yaml': None,
-                        'rhel7.0.yaml': None,
-                        'ubuntu12.04.yaml': None,
-                        'ubuntu14.04.yaml': None,
-                        'vps.yaml': None,
-                        'vps_centos6.5.yaml': None,
-                        'vps_debian7.yaml': None,
-                        'vps_rhel6.4.yaml': None,
-                        'vps_rhel6.5.yaml': None,
-                        'vps_rhel7.0.yaml': None,
-                        'vps_ubuntu14.04.yaml': None,
-                    },
-                    'tasks': {
-                        'teuthology.yaml': None,
-                    },
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('teuthology/no-ceph')
-        self.stop_patchers()
-
-        fake_fs2 = {
-            'teuthology': {
-                'no-ceph': {
-                    '%': None,
-                    'clusters': {
-                        'single.yaml': None,
-                    },
-                    'distros': {
-                        'empty': {},
-                        'baremetal.yaml': None,
-                        'rhel7.0.yaml': None,
-                        'ubuntu12.04.yaml': None,
-                        'ubuntu14.04.yaml': None,
-                        'vps.yaml': None,
-                        'vps_centos6.5.yaml': None,
-                        'vps_debian7.yaml': None,
-                        'vps_rhel6.4.yaml': None,
-                        'vps_rhel6.5.yaml': None,
-                        'vps_rhel7.0.yaml': None,
-                        'vps_ubuntu14.04.yaml': None,
-                    },
-                    'tasks': {
-                        'teuthology.yaml': None,
-                    },
-                    'empty': {},
-                },
-            },
-        }
-        self.start_patchers(fake_fs2)
-        result2 = suite.build_matrix('teuthology/no-ceph')
-        assert len(result) == 11
-        assert len(result2) == len(result)
-
-    def test_disable_extension(self):
-        fake_fs = {
-            'teuthology': {
-                'no-ceph': {
-                    '%': None,
-                    'clusters': {
-                        'single.yaml': None,
-                    },
-                    'distros': {
-                        'baremetal.yaml': None,
-                        'rhel7.0.yaml': None,
-                        'ubuntu12.04.yaml': None,
-                        'ubuntu14.04.yaml': None,
-                        'vps.yaml': None,
-                        'vps_centos6.5.yaml': None,
-                        'vps_debian7.yaml': None,
-                        'vps_rhel6.4.yaml': None,
-                        'vps_rhel6.5.yaml': None,
-                        'vps_rhel7.0.yaml': None,
-                        'vps_ubuntu14.04.yaml': None,
-                    },
-                    'tasks': {
-                        'teuthology.yaml': None,
-                    },
-                },
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('teuthology/no-ceph')
-        self.stop_patchers()
-
-        fake_fs2 = {
-            'teuthology': {
-                'no-ceph': {
-                    '%': None,
-                    'clusters': {
-                        'single.yaml': None,
-                    },
-                    'distros': {
-                        'baremetal.yaml': None,
-                        'rhel7.0.yaml': None,
-                        'ubuntu12.04.yaml': None,
-                        'ubuntu14.04.yaml': None,
-                        'vps.yaml': None,
-                        'vps_centos6.5.yaml': None,
-                        'vps_debian7.yaml': None,
-                        'vps_rhel6.4.yaml': None,
-                        'vps_rhel6.5.yaml': None,
-                        'vps_rhel7.0.yaml': None,
-                        'vps_ubuntu14.04.yaml': None,
-                        'forcefilevps_ubuntu14.04.yaml.disable': None,
-                        'forcefilevps_ubuntu14.04.yaml.anotherextension': None,
-                    },
-                    'tasks': {
-                        'teuthology.yaml': None,
-                        'forcefilevps_ubuntu14.04notyaml': None,
-                    },
-                    'forcefilevps_ubuntu14.04notyaml': None,
-                    'tasks.disable': {
-                        'teuthology2.yaml': None,
-                        'forcefilevps_ubuntu14.04notyaml': None,
-                    },
-                },
-            },
-        }
-        self.start_patchers(fake_fs2)
-        result2 = suite.build_matrix('teuthology/no-ceph')
-        assert len(result) == 11
-        assert len(result2) == len(result)
-
-    def test_sort_order(self):
-        # This test ensures that 'ceph' comes before 'ceph-thrash' when yaml
-        # fragments are sorted.
-        fake_fs = {
-            'thrash': {
-                '%': None,
-                'ceph-thrash': {'default.yaml': None},
-                'ceph': {'base.yaml': None},
-                'clusters': {'mds-1active-1standby.yaml': None},
-                'debug': {'mds_client.yaml': None},
-                'fs': {'btrfs.yaml': None},
-                'msgr-failures': {'none.yaml': None},
-                'overrides': {'whitelist_wrongly_marked_down.yaml': None},
-                'tasks': {'cfuse_workunit_suites_fsstress.yaml': None},
-            },
-        }
-        self.start_patchers(fake_fs)
-        result = suite.build_matrix('thrash')
-        assert len(result) == 1
-        assert self.fragment_occurences(result, 'base.yaml') == 1
-        fragments = result[0][1]
-        assert fragments[0] == 'thrash/ceph/base.yaml'
-        assert fragments[1] == 'thrash/ceph-thrash/default.yaml'
-
-class TestSubset(object):
-    patchpoints = [
-        'os.path.exists',
-        'os.listdir',
-        'os.path.isfile',
-        'os.path.isdir',
-        '__builtin__.open',
-    ]
-
-    def setup(self):
-        self.mocks = dict()
-        self.patchers = dict()
-        for ppoint in self.__class__.patchpoints:
-            self.mocks[ppoint] = MagicMock()
-            self.patchers[ppoint] = patch(ppoint, self.mocks[ppoint])
-
-    def start_patchers(self, fake_fs):
-        fake_fns = make_fake_fstools(fake_fs)
-        # relies on fake_fns being in same order as patchpoints
-        for ppoint, fn in zip(self.__class__.patchpoints, fake_fns):
-            self.mocks[ppoint].side_effect = fn
-        for patcher in self.patchers.values():
-            patcher.start()
-
-    def stop_patchers(self):
-        for patcher in self.patchers.values():
-            patcher.stop()
-
-    # test_random() manages start/stop patchers on its own; no teardown
-
-    MAX_FACETS = 10
-    MAX_FANOUT = 3
-    MAX_DEPTH = 3
-    MAX_SUBSET = 10
-    @staticmethod
-    def generate_fake_fs(max_facets, max_fanout, max_depth):
-        def yamilify(name):
-            return name + ".yaml"
-        def name_generator():
-            x = 0
-            while True:
-                yield(str(x))
-                x += 1
-        def generate_tree(
-                max_facets, max_fanout, max_depth, namegen, top=True):
-            if max_depth is 0:
-                return None
-            if max_facets is 0:
-                return None
-            items = random.choice(range(max_fanout))
-            if items is 0 and top:
-                items = 1
-            if items is 0:
-                return None
-            sub_max_facets = max_facets / items
-            tree = {}
-            for i in range(items):
-                subtree = generate_tree(
-                    sub_max_facets, max_fanout,
-                    max_depth - 1, namegen, top=False)
-                if subtree is not None:
-                    tree[namegen.next()] = subtree
-                else:
-                    tree[yamilify(namegen.next())] = None
-            random.choice([
-                lambda: tree.update({'%': None}),
-                lambda: None])()
-            return tree
-        return {
-            'root':  generate_tree(
-                max_facets, max_fanout, max_depth, name_generator())
-        }
-
-    @staticmethod
-    def generate_subset(maxsub):
-        den = random.choice(range(maxsub-1))+1
-        return (random.choice(range(den)), den)
-
-    @staticmethod
-    def generate_description_list(tree, subset):
-        mat, first, matlimit = suite._get_matrix(
-            'root', subset=subset)
-        return [i[0] for i in suite.generate_combinations(
-            'root', mat, first, matlimit)], mat, first, matlimit
-
-    @staticmethod
-    def verify_facets(tree, description_list, subset, mat, first, matlimit):
-        def flatten(tree):
-            for k,v in tree.iteritems():
-                if v is None and '.yaml' in k:
-                    yield k
-                elif v is not None and '.disable' not in k:
-                    for x in flatten(v):
-                        yield x
-        def pptree(tree, tabs=0):
-            ret = ""
-            for k, v in tree.iteritems():
-                if v is None:
-                    ret += ('\t'*tabs) + k.ljust(10) + "\n"
-                else:
-                    ret += ('\t'*tabs) + (k + ':').ljust(10) + "\n"
-                    ret += pptree(v, tabs+1)
-            return ret
-        for facet in flatten(tree):
-            found = False
-            for i in description_list:
-                if facet in i:
-                    found = True
-                    break
-            if not found:
-                print "tree\n{tree}\ngenerated list\n{desc}\n\nfrom matrix\n\n{matrix}\nsubset {subset} without facet {fac}".format(
-                    tree=pptree(tree),
-                    desc='\n'.join(description_list),
-                    subset=subset,
-                    matrix=str(mat),
-                    fac=facet)
-                all_desc = suite.generate_combinations(
-                    'root',
-                    mat,
-                    0,
-                    mat.size())
-                for i, desc in zip(xrange(mat.size()), all_desc):
-                    if i == first:
-                        print '=========='
-                    print i, desc
-                    if i + 1 == matlimit:
-                        print '=========='
-            assert found
-
-    def test_random(self):
-        for i in xrange(10000):
-            tree = self.generate_fake_fs(
-                self.MAX_FACETS,
-                self.MAX_FANOUT,
-                self.MAX_DEPTH)
-            subset = self.generate_subset(self.MAX_SUBSET)
-            self.start_patchers(tree)
-            dlist, mat, first, matlimit = self.generate_description_list(tree, subset)
-            self.verify_facets(tree, dlist, subset, mat, first, matlimit)
-            self.stop_patchers()
-
-@patch('subprocess.check_output')
-def test_git_branch_exists(m_check_output):
-    m_check_output.return_value = ''
-    assert False == suite.git_branch_exists('ceph', 'nobranchnowaycanthappen')
-    m_check_output.return_value = 'HHH branch'
-    assert True == suite.git_branch_exists('ceph', 'master')
-
-
-@patch.object(suite.ResultsReporter, 'get_jobs')
-def test_wait_success(m_get_jobs, caplog):
-    results = [
-        [{'status': 'queued', 'job_id': '2'}],
-        [],
-    ]
-    final = [
-        {'status': 'pass', 'job_id': '1',
-         'description': 'DESC1', 'log_href': 'http://URL1'},
-        {'status': 'fail', 'job_id': '2',
-         'description': 'DESC2', 'log_href': 'http://URL2'},
-        {'status': 'pass', 'job_id': '3',
-         'description': 'DESC3', 'log_href': 'http://URL3'},
-    ]
-    def get_jobs(name, **kwargs):
-        if kwargs['fields'] == ['job_id', 'status']:
-            return in_progress.pop(0)
-        else:
-            return final
-    m_get_jobs.side_effect = get_jobs
-    suite.Run.WAIT_PAUSE = 1
-
-    in_progress = deepcopy(results)
-    assert 0 == suite.wait('name', 1, 'http://UPLOAD_URL')
-    assert m_get_jobs.called_with('name', fields=['job_id', 'status'])
-    assert 0 == len(in_progress)
-    assert 'fail http://UPLOAD_URL/name/2' in caplog.text()
-
-    in_progress = deepcopy(results)
-    in_progress = deepcopy(results)
-    assert 0 == suite.wait('name', 1, None)
-    assert m_get_jobs.called_with('name', fields=['job_id', 'status'])
-    assert 0 == len(in_progress)
-    assert 'fail http://URL2' in caplog.text()
-
-@patch.object(suite.ResultsReporter, 'get_jobs')
-def test_wait_fails(m_get_jobs):
-    results = []
-    results.append([{'status': 'queued', 'job_id': '2'}])
-    results.append([{'status': 'queued', 'job_id': '2'}])
-    results.append([{'status': 'queued', 'job_id': '2'}])
-    def get_jobs(name, **kwargs):
-        return results.pop(0)
-    m_get_jobs.side_effect = get_jobs
-    suite.Run.WAIT_PAUSE = 1
-    suite.Run.WAIT_MAX_JOB_TIME = 1
-    with pytest.raises(suite.WaitException) as error:
-        suite.wait('name', 1, None)
-        assert 'abc' in str(error)
-
-
-class TestSuiteMain(object):
-    def test_main(self):
-        suite_name = 'SUITE'
-        throttle = '3'
-        machine_type = 'burnupi'
-
-        def prepare_and_schedule(obj):
-            assert obj.base_config.suite == suite_name
-            assert obj.args.throttle == throttle
-
-        def fake_str(*args, **kwargs):
-            return 'fake'
-
-        def fake_bool(*args, **kwargs):
-            return True
-
-        with patch.multiple(
-                suite,
-                fetch_repos=DEFAULT,
-                package_version_for_hash=fake_str,
-                git_branch_exists=fake_bool,
-                git_ls_remote=fake_str,
-                ):
-            with patch.multiple(
-                suite.Run,
-                prepare_and_schedule=prepare_and_schedule,
-            ):
-                main([
-                    '--suite', suite_name,
-                    '--throttle', throttle,
-                    '--machine-type', machine_type,
-                ])
-
-    def test_schedule_suite(self):
-        suite_name = 'noop'
-        throttle = '3'
-        machine_type = 'burnupi'
-
-        with patch.multiple(
-                suite,
-                fetch_repos=DEFAULT,
-                teuthology_schedule=DEFAULT,
-                get_arch=lambda x: 'x86_64',
-                git_ls_remote=lambda *args: '12345',
-                package_version_for_hash=DEFAULT,
-                ) as m:
-            m['package_version_for_hash'].return_value = 'fake-9.5'
-            config.suite_verify_ceph_hash = True
-            main(['--suite', suite_name,
-                  '--suite-dir', 'teuthology/test',
-                  '--throttle', throttle,
-                  '--machine-type', machine_type])
-            m_sleep.assert_called_with(int(throttle))
-
-    def test_schedule_suite_noverify(self):
-        suite_name = 'noop'
-        throttle = '3'
-        machine_type = 'burnupi'
-        with patch.multiple(
-                suite,
-                fetch_repos=DEFAULT,
-                teuthology_schedule=DEFAULT,
-                get_arch=lambda x: 'x86_64',
-                get_gitbuilder_hash=DEFAULT,
-                git_ls_remote=lambda *args: '1234',
-                package_version_for_hash=lambda *args: 'fake-9.5',
-                ) as m:
-            config.suite_verify_ceph_hash = False
-            main(['--suite', suite_name,
-                  '--suite-dir', 'teuthology/test',
-                  '--throttle', throttle,
-                  '--machine-type', machine_type])
-            m_sleep.assert_called_with(int(throttle))
-            m['get_gitbuilder_hash'].assert_not_called()