import json
import re
import uuid
+import yaml
import six
import toml
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
+ # Add logs directory to job's info log file
+ with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
+ info_yaml = yaml.safe_load(info_file)
+ info_file.seek(0)
+ if 'archive' not in info_yaml:
+ info_yaml['archive'] = {'log': '/var/log/ceph'}
+ else:
+ info_yaml['archive']['log'] = '/var/log/ceph'
+ yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
+
try:
yield
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
+ # Add logs directory to job's info log file
+ with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
+ info_yaml = yaml.safe_load(info_file)
+ info_file.seek(0)
+ if 'archive' not in info_yaml:
+ info_yaml['archive'] = {'crash': '/var/lib/ceph/%s/crash' % fsid}
+ else:
+ info_yaml['archive']['crash'] = '/var/lib/ceph/%s/crash' % fsid
+ yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
+
try:
yield
first_mon = ctx.ceph[cluster_name].first_mon
first_mon_role = ctx.ceph[cluster_name].first_mon_role
mons = ctx.ceph[cluster_name].mons
-
+
ctx.cluster.run(args=[
'sudo', 'mkdir', '-p', '/etc/ceph',
]);
ctx.daemons.get_daemon(type_, id_, cluster).stop()
except Exception:
log.exception('Failed to stop "{role}"'.format(role=role))
- raise
+ raise
# clean up /etc/ceph
ctx.cluster.run(args=[
if cluster_name not in ctx.ceph:
ctx.ceph[cluster_name] = argparse.Namespace()
ctx.ceph[cluster_name].bootstrapped = False
-
+
# image
teuth_defaults = teuth_config.get('defaults', {})
cephadm_defaults = teuth_defaults.get('cephadm', {})
def add_mirror_to_cluster(ctx, mirror):
log.info('Adding local image mirror %s' % mirror)
-
+
registries_conf = '/etc/containers/registries.conf'
-
+
for remote in ctx.cluster.remotes.keys():
try:
config = teuthology.get_file(