yield
+@contextlib.contextmanager
+def module_setup(ctx, config):
+ cluster_name = config['cluster']
+ first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ modules = config.get('mgr-modules', [])
+ for m in modules:
+ m = str(m)
+ cmd = [
+ 'sudo',
+ 'ceph',
+ '--cluster',
+ cluster_name,
+ 'mgr',
+ 'module',
+ 'emable',
+ m,
+ ]
+ log.info("enabling module %s", m)
+ mon_remote.run(args=cmd)
+ yield
+
+
+@contextlib.contextmanager
+def conf_setup(ctx, config):
+ cluster_name = config['cluster']
+ first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ configs = config.get('cluster-conf', {})
+ procs = []
+ for section, confs in configs.items():
+ section = str(section)
+ for k, v in confs.items():
+ k = str(k).replace(' ', '_') # pre-pacific compatibility
+ v = str(v)
+ cmd = [
+ 'sudo',
+ 'ceph',
+ '--cluster',
+ cluster_name,
+ 'config',
+ 'set',
+ section,
+ k,
+ v,
+ ]
+ log.info("setting config [%s] %s = %s", section, k, v)
+ procs.append(mon_remote.run(args=cmd, wait=False))
+ log.debug("set %d configs", len(procs))
+ for p in procs:
+ log.debug("waiting for %s", p)
+ p.wait()
+ yield
+
+@contextlib.contextmanager
+def conf_epoch(ctx, config):
+ cm = ctx.managers[config['cluster']]
+ cm.save_conf_epoch()
+ yield
+
@contextlib.contextmanager
def check_enable_crimson(ctx, config):
# enable crimson-osds if crimson
mon_bind_addrvec=config.get('mon_bind_addrvec', True),
)),
lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
+ lambda: module_setup(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
+ lambda: conf_setup(ctx=ctx, config=config),
lambda: crush_setup(ctx=ctx, config=config),
lambda: check_enable_crimson(ctx=ctx, config=config),
lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: watchdog_setup(ctx=ctx, config=config),
+ lambda: conf_epoch(ctx=ctx, config=config),
]
with contextutil.nested(*subtasks):
timeout = kwargs.pop('timeout', 120)
return ['sudo'] + self.pre + ['timeout', f'{timeout}', 'ceph',
'--cluster', self.cluster]
+ def save_conf_epoch(self):
+ p = self.ceph("config log 1 --format=json")
+ J = json.loads(p.stdout.getvalue())
+ self.ctx.conf_epoch = J[0]["version"]
+ log.info("config epoch is %d", self.ctx.conf_epoch)
def ceph(self, cmd, **kwargs):
"""
yield
+@contextlib.contextmanager
+def module_setup(ctx, config):
+ cluster_name = config['cluster']
+ remote = ctx.ceph[cluster_name].bootstrap_remote
+
+ modules = config.get('mgr-modules', [])
+ for m in modules:
+ m = str(m)
+ cmd = [
+ 'sudo',
+ 'ceph',
+ '--cluster',
+ cluster_name,
+ 'mgr',
+ 'module',
+ 'enable',
+ m,
+ ]
+ log.info("enabling module %s", m)
+ _shell(ctx, cluster_name, remote, args=cmd)
+ yield
+
+
+@contextlib.contextmanager
+def conf_setup(ctx, config):
+ cluster_name = config['cluster']
+ remote = ctx.ceph[cluster_name].bootstrap_remote
+
+ configs = config.get('cluster-conf', {})
+ procs = []
+ for section, confs in configs.items():
+ section = str(section)
+ for k, v in confs.items():
+ k = str(k).replace(' ', '_') # pre-pacific compatibility
+ v = str(v)
+ cmd = [
+ 'ceph',
+ 'config',
+ 'set',
+ section,
+ k,
+ v,
+ ]
+ log.info("setting config [%s] %s = %s", section, k, v)
+ procs.append(_shell(ctx, cluster_name, remote, args=cmd, wait=False))
+ log.debug("set %d configs", len(procs))
+ for p in procs:
+ log.debug("waiting for %s", p)
+ p.wait()
+ yield
+
+@contextlib.contextmanager
+def conf_epoch(ctx, config):
+ cm = ctx.managers[config['cluster']]
+ cm.save_conf_epoch()
+ yield
+
@contextlib.contextmanager
def create_rbd_pool(ctx, config):
if config.get('create_rbd_pool', False):
lambda: crush_setup(ctx=ctx, config=config),
lambda: ceph_mons(ctx=ctx, config=config),
lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
+ lambda: module_setup(ctx=ctx, config=config),
lambda: ceph_mgrs(ctx=ctx, config=config),
+ lambda: conf_setup(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: cephfs_setup(ctx=ctx, config=config),
lambda: ceph_monitoring('grafana', ctx=ctx, config=config),
lambda: ceph_clients(ctx=ctx, config=config),
lambda: create_rbd_pool(ctx=ctx, config=config),
+ lambda: conf_epoch(ctx=ctx, config=config),
):
try:
if config.get('wait-for-healthy', True):
self.testdir = None
self.RADOS_CMD = [RADOS_CMD]
+ self.save_conf_epoch()
+
def get_ceph_cmd(self, **kwargs):
return [CEPH_CMD]