"""
-usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME
+usage: teuthology-results [-h] [-v] [--dry-run] [--email EMAIL] [--timeout TIMEOUT] --archive-dir DIR --name NAME --subset SUBSET --seed SEED
Email teuthology suite results
[default: 0]
--archive-dir DIR path under which results for the suite are stored
--name NAME name of the suite
+ --subset SUBSET subset passed to teuthology-suite
+ --seed SEED random seed used in teuthology-suite
"""
import docopt
import teuthology.results
--timeout <timeout> How many seconds to wait for jobs to
finish before emailing results. Only
applies to the last job in a suite.
+ --seed <seed> The random seed for rerunning the suite.
+ Only applies to the last job in a suite.
+ --subset <subset> The subset option passed to teuthology-suite.
+ Only applies to the last job in a suite.
--dry-run Instead of scheduling, just output the
job config.
[default: fail,dead]
--seed SEED An random number mostly useful when used along
with --rerun argument. This number can be found
- in the output of teuthology-suite command.
+ in the output of teuthology-suite command. -1
+ for a random seed [default: -1].
""".format(
default_machine_type=config.default_machine_type,
response.raise_for_status()
return response.json()
+ def _parse_log_line(self, line, prefix):
+ msg = line.split(' ', 1)[1].split(':', 2)[-1]
+ if not msg.startswith(prefix):
+ return None
+ else:
+ return msg[len(prefix):]
+
+ def get_rerun_conf(self, run_name):
+ log_path = os.path.join(self.archive_base, run_name, 'results.log')
+ # parse the log file generated by teuthology.results.results()
+ subset = None
+ seed = -1
+ with file(log_path) as results_log:
+ for line in results_log:
+ if ':' not in line:
+ # stop if this does not look line a log line
+ break
+ if subset is None:
+ subset = self._parse_log_line(line, 'subset:')
+ elif seed is None:
+ seed = self._parse_log_line(line, 'seed:')
+ else:
+ break
+ if subset is not None:
+ subset = tuple(int(i) for i in subset.split('/'))
+ if seed is not None:
+ seed = int(seed)
+ return subset, seed
+
def delete_job(self, run_name, job_id):
"""
Delete a job from the results server.
try:
results(args['--archive-dir'], args['--name'], args['--email'],
- int(args['--timeout']), args['--dry-run'])
+ int(args['--timeout']), args['--dry-run'],
+ args['--subset'], args['--seed'])
except Exception:
log.exception('error generating results')
raise
-def results(archive_dir, name, email, timeout, dry_run):
+def results(archive_dir, name, email, timeout, dry_run, subset, seed):
starttime = time.time()
+ if subset:
+ log.info('subset: %r', subset)
+ if seed:
+ log.info('seed: %r', seed)
if timeout:
log.info('Waiting up to %d seconds for tests to finish...', timeout)
def main(args):
if not args['--last-in-suite']:
- if args['--email']:
- raise ValueError(
- '--email is only applicable to the last job in a suite')
- if args['--timeout']:
- raise ValueError(
- '--timeout is only applicable to the last job in a suite')
+ last_job_args = ['email', 'timeout', 'subset', 'seed']
+ for arg in last_job_args:
+ opt = '--{arg}'.format(arg=arg)
+ msg_fmt = '{opt} is only applicable to the last job in a suite'
+ if args[opt]:
+ raise ValueError(msg_fmt.format(opt=opt))
name = args['--name']
if not name or name.isdigit():
# settings in the yaml override what's passed on the command line. This is
# primarily to accommodate jobs with multiple machine types.
job_config.update(conf_dict)
- if args['--timeout'] is not None:
- job_config['results_timeout'] = args['--timeout']
+ for arg,conf in {'--timeout':'results_timeout',
+ '--seed': 'seed',
+ '--subset': 'subset'}.items():
+ val = args.get(arg, None)
+ if val is not None:
+ job_config[conf] = val
+
return job_config
value = normalize_suite_name(value)
if key == 'suite_relpath' and value is None:
value = ''
- elif key in ('limit', 'priority', 'num', 'newest'):
+ elif key in ('limit', 'priority', 'num', 'newest', 'seed'):
value = int(value)
elif key == 'subset' and value is not None:
# take input string '2/3' and turn into (2, 3)
return
conf.filter_in.extend(rerun_filters['descriptions'])
conf.suite = normalize_suite_name(rerun_filters['suite'])
- if conf.seed is None:
+ conf.subset, conf.seed = get_rerun_conf(conf)
+ if conf.seed < 0:
conf.seed = random.randint(0, 9999)
log.info('Using random seed=%s', conf.seed)
return filters
+def get_rerun_conf(conf):
+ reporter = ResultsReporter()
+ subset, seed = reporter.get_rerun_conf(conf.rerun)
+ if seed < 0:
+ return conf.subset, conf.seed
+ if conf.seed < 0:
+ log.info('Using stored seed=%s', seed)
+ elif conf.seed != seed:
+ log.error('--seed {conf_seed} does not match with ' +
+ 'stored seed: {stored_seed}',
+ conf_seed=conf.seed,
+ stored_seed=seed)
+ if conf.subset is None:
+ log.info('Using stored subset=%s', subset)
+ elif conf.subset != subset:
+ log.error('--subset {conf_subset} does not match with ' +
+ 'stored subset: {stored_subset}',
+ conf_subset=conf.subset,
+ stored_subset=subset)
+ return subset, seed
+
+
class WaitException(Exception):
pass
num_jobs = self.schedule_suite()
- if self.base_config.email and num_jobs:
+ if num_jobs:
arg = copy.deepcopy(self.base_args)
arg.append('--last-in-suite')
- arg.extend(['--email', self.base_config.email])
+ if self.base_config.email:
+ arg.extend(['--email', self.base_config.email])
+ if self.args.subset:
+ subset = '/'.join(str(i) for i in self.args.subset)
+ arg.extend(['--subset', subset])
+ arg.extend(['--seed', str(self.args.seed)])
if self.args.timeout:
arg.extend(['--timeout', self.args.timeout])
util.teuthology_schedule(
args=arg,
dry_run=self.args.dry_run,
verbose=self.args.verbose,
- log_prefix="Results email: ",
+ log_prefix="Results: ",
)
results_url = get_results_url(self.base_config.name)
if results_url:
if job_config.get('last_in_suite'):
if teuth_config.results_server:
report.try_delete_jobs(job_config['name'], job_config['job_id'])
- log.info('Generating results email for %s', job_config['name'])
+ log.info('Generating results for %s', job_config['name'])
args = [
os.path.join(teuth_bin_path, 'teuthology-results'),
'--timeout',
str(job_config.get('results_timeout',
teuth_config.results_timeout)),
- '--email',
- job_config['email'],
'--archive-dir',
os.path.join(archive_dir, safe_archive),
'--name',
job_config['name'],
+ '--seed',
+ job_config['seed'],
]
+ if job_config.get('email'):
+ args.extend(['--email', job_config['email']])
+ if job_config.get('subset'):
+ args.extend(['--subset', job_config['subset']])
# Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
# make sure that it will continue to run if this worker process
# dies (e.g. because of a restart)