From: Shraddha Agrawal Date: Mon, 20 Jul 2020 21:03:12 +0000 (+0530) Subject: qa/tasks/ceph.py: add ceph logs directory in job's info.yaml X-Git-Tag: v16.1.0~1306^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e991f04b59b96271042f749ab1b7b9c56701c4f9;p=ceph.git qa/tasks/ceph.py: add ceph logs directory in job's info.yaml This commit adds the file path of ceph log directories to the job's info.yaml log file. The motivation behind this is, in case of job timeout, the logs would still be tranferred to teuthology host before nuking test machines using these ceph log directory paths in job's info.yaml log file. Signed-off-by: Shraddha Agrawal --- diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index 71fc2eafae19..a247b52359b0 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -17,6 +17,7 @@ import time import gevent import re import socket +import yaml from paramiko import SSHException from tasks.ceph_manager import CephManager, write_conf @@ -75,8 +76,19 @@ def generate_caps(type_): @contextlib.contextmanager def ceph_crash(ctx, config): """ - Gather crash dumps from /var/lib/crash + Gather crash dumps from /var/lib/ceph/crash """ + + # Add logs directory to job's info log file + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + if 'archive' not in info_yaml: + info_yaml['archive'] = {'crash': '/var/lib/ceph/crash'} + else: + info_yaml['archive']['crash'] = '/var/lib/ceph/crash' + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + try: yield @@ -146,6 +158,16 @@ def ceph_log(ctx, config): ) ) + # Add logs directory to job's info log file + with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file: + info_yaml = yaml.safe_load(info_file) + info_file.seek(0) + if 'archive' not in info_yaml: + info_yaml['archive'] = {'log': '/var/log/ceph'} + else: + info_yaml['archive']['log'] = '/var/log/ceph' + yaml.safe_dump(info_yaml, info_file, default_flow_style=False) + class Rotater(object): stop_event = gevent.event.Event() @@ -1529,7 +1551,7 @@ def restart(ctx, config): ctx.managers[cluster].mark_down_osd(id_) ctx.daemons.get_daemon(type_, id_, cluster).restart() clusters.add(cluster) - + if config.get('wait-for-healthy', True): for cluster in clusters: healthy(ctx=ctx, config=dict(cluster=cluster))