Presently the code tries to merge the base_config when the worker starts
running. There's no need to construct it this way and it prevents sharing the
"defaults" with the fragment merging infrastructure. It also prevents
overriding defaults like:
kernel
branch: wip-pdonnell-i66704
client:
branch: wip-pdonnell-i66704
flavor: default
kdb: 1
sha1:
745cacd8f31e50d7f3b6039bbd8c9a8dfc07bf03
flavor: default
kdb: 1
sha1:
745cacd8f31e50d7f3b6039bbd8c9a8dfc07bf03
A YAML fragment can set kernel.client but it cannot delete the defaults for
kernel.(branch|flavor|kdb|sha1) because there's no way to remove YAML elements
via a deep merge.
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
from types import MappingProxyType
import yaml
+from teuthology.config import JobConfig
from teuthology.suite.build_matrix import combine_path
from teuthology.suite.util import strip_fragment_path
from teuthology.misc import deep_merge
the entire job (config) from the list.
"""
seed = kwargs.setdefault('seed', 1)
+ base_config = kwargs.setdefault('base_config', JobConfig())
if not isinstance(seed, int):
log.debug("no valid seed input: using 1")
seed = 1
if suite_name is not None:
desc = combine_path(suite_name, desc)
- yaml_complete_obj = {}
+ yaml_complete_obj = copy.deepcopy(base_config.to_dict())
deep_merge(yaml_complete_obj, dict(TEUTHOLOGY_TEMPLATE))
for path in paths:
if path not in yaml_cache:
from humanfriendly import format_timespan
-from tempfile import NamedTemporaryFile
from teuthology import repo_utils
from teuthology.config import config, JobConfig
filter_out=self.args.filter_out,
filter_all=self.args.filter_all,
filter_fragments=self.args.filter_fragments,
+ base_config=self.base_config,
seed=self.args.seed,
suite_name=suite_name))
- # create, but do not write, the temp file here, so it can be
- # added to the args in collect_jobs, but not filled until
- # any backtracking is done
- base_yaml_path = NamedTemporaryFile(
- prefix='schedule_suite_', delete=False
- ).name
- self.base_yaml_paths.insert(0, base_yaml_path)
-
# compute job limit in respect of --sleep-before-teardown
job_limit = self.args.limit or 0
sleep_before_teardown = int(self.args.sleep_before_teardown or 0)
dry_run=self.args.dry_run,
)
- with open(base_yaml_path, 'w+b') as base_yaml:
- base_yaml.write(str(self.base_config).encode())
-
if jobs_to_schedule:
self.write_rerun_memo()
self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)
- os.remove(base_yaml_path)
-
count = len(jobs_to_schedule)
missing_count = len(jobs_missing_packages)
total_count = count
+import logging
import os
import pytest
import requests
from teuthology.suite import run
from teuthology.util.time import TIMESTAMP_FMT
+log = logging.getLogger(__name__)
class TestRun(object):
klass = run.Run
[call('ceph_sha1', 'default', 'ubuntu', '14.04', 'machine_type')],
)
y = {
- 'teuthology': {
- 'fragments_dropped': [],
- 'meta': {},
- 'postmerge': []
- },
'field1': 'val1',
'field2': 'val2'
}
+ teuthology_keys = [
+ 'branch',
+ 'machine_type',
+ 'name',
+ 'os_type',
+ 'os_version',
+ 'overrides',
+ 'priority',
+ 'repo',
+ 'seed',
+ 'sha1',
+ 'sleep_before_teardown',
+ 'suite',
+ 'suite_branch',
+ 'suite_relpath',
+ 'suite_repo',
+ 'suite_sha1',
+ 'tasks',
+ 'teuthology_branch',
+ 'teuthology_sha1',
+ 'timestamp',
+ 'user',
+ 'teuthology',
+ ]
+ for t in teuthology_keys:
+ y[t] = ANY
expected_job = dict(
yaml=y,
sha1='ceph_sha1',
'--description',
os.path.join(self.args.suite, build_matrix_desc),
'--',
- ANY, # base config
'-'
],
- stdin=yaml.dump(y),
+ stdin=ANY,
desc=os.path.join(self.args.suite, build_matrix_desc),
)
m_schedule_jobs.assert_has_calls(
[call([], [expected_job], runobj.name)],
)
+ args = m_schedule_jobs.call_args.args
+ log.debug("args =\n%s", args)
+ jobargs = args[1][0]
+ stdin_yaml = yaml.safe_load(jobargs['stdin'])
+ for k in y:
+ assert y[k] == stdin_yaml[k]
+ for k in teuthology_keys:
+ assert k in stdin_yaml
m_write_rerun_memo.assert_called_once_with()
@patch('teuthology.suite.util.find_git_parents')