From d2ff5c5eb609eba02e6c966015bb7e1d9b349be4 Mon Sep 17 00:00:00 2001 From: Shraddha Agrawal Date: Wed, 19 Aug 2020 16:24:18 +0530 Subject: [PATCH] qa/tasks/cephadm.py: add ceph logs directory in job's info.yaml This commit adds the file path of ceph log directories to the job's info.yaml log file. The motivation behind this is, in case of job timeout, the logs would still be tranferred to teuthology host before nuking test machines using these ceph log directory paths in job's info.yaml log file. Signed-off-by: Shraddha Agrawal --- qa/tasks/cephadm.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/qa/tasks/cephadm.py b/qa/tasks/cephadm.py index 4dac03941bfcb..692c84f223929 100644 --- a/qa/tasks/cephadm.py +++ b/qa/tasks/cephadm.py @@ -10,6 +10,7 @@ import os import json import re import uuid +import yaml from io import BytesIO, StringIO import toml @@ -158,6 +159,16 @@ def ceph_log(ctx, config): cluster_name = config['cluster'] fsid = ctx.ceph[cluster_name].fsid + # Add logs directory to job's info log file + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + if 'archive' not in info_yaml: + info_yaml['archive'] = {'log': '/var/log/ceph'} + else: + info_yaml['archive']['log'] = '/var/log/ceph' + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + try: yield @@ -265,6 +276,16 @@ def ceph_crash(ctx, config): cluster_name = config['cluster'] fsid = ctx.ceph[cluster_name].fsid + # Add logs directory to job's info log file + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + if 'archive' not in info_yaml: + info_yaml['archive'] = {'crash': '/var/lib/ceph/%s/crash' % fsid} + else: + info_yaml['archive']['crash'] = '/var/lib/ceph/%s/crash' % fsid + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + try: yield @@ -307,7 +328,7 @@ def ceph_bootstrap(ctx, config, registry): first_mon = ctx.ceph[cluster_name].first_mon first_mon_role = ctx.ceph[cluster_name].first_mon_role mons = ctx.ceph[cluster_name].mons - + ctx.cluster.run(args=[ 'sudo', 'mkdir', '-p', '/etc/ceph', ]); @@ -469,7 +490,7 @@ def ceph_bootstrap(ctx, config, registry): ctx.daemons.get_daemon(type_, id_, cluster).stop() except Exception: log.exception(f'Failed to stop "{role}"') - raise + raise # clean up /etc/ceph ctx.cluster.run(args=[ @@ -1128,7 +1149,7 @@ def task(ctx, config): if cluster_name not in ctx.ceph: ctx.ceph[cluster_name] = argparse.Namespace() ctx.ceph[cluster_name].bootstrapped = False - + # image teuth_defaults = teuth_config.get('defaults', {}) cephadm_defaults = teuth_defaults.get('cephadm', {}) @@ -1248,9 +1269,9 @@ def registries_add_mirror_to_docker_io(conf, mirror): def add_mirror_to_cluster(ctx, mirror): log.info('Adding local image mirror %s' % mirror) - + registries_conf = '/etc/containers/registries.conf' - + for remote in ctx.cluster.remotes.keys(): try: config = teuthology.get_file( -- 2.47.3