ctx.daemons = DaemonGroup(use_ceph_daemon=True)
if not hasattr(ctx, 'ceph'):
ctx.ceph = {}
+ ctx.managers = {}
if 'cluster' not in config:
config['cluster'] = 'ceph'
cluster_name = config['cluster']
lambda: ceph_mdss(ctx=ctx, config=config),
lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
):
+ ctx.managers[cluster_name] = CephManager(
+ ctx.ceph[cluster_name].bootstrap_remote,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager.' + cluster_name),
+ cluster=cluster_name,
+ ceph_daemon=True,
+ )
+
try:
log.info('Setup complete, yielding')
yield
log = logging.getLogger(__name__)
+# this is for ceph-daemon clusters
+def shell(ctx, cluster_name, remote, args, **kwargs):
+ testdir = teuthology.get_testdir(ctx)
+ return remote.run(
+ args=[
+ 'sudo',
+ '{}/ceph-daemon'.format(testdir),
+ '--image', ctx.image,
+ 'shell',
+ '-c', '{}/{}.conf'.format(testdir, cluster_name),
+ '-k', '{}/{}.keyring'.format(testdir, cluster_name),
+ '--fsid', ctx.ceph[cluster_name].fsid,
+ '--',
+ ] + args,
+ **kwargs
+ )
def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
conf_fp = StringIO()
"""
def __init__(self, controller, ctx=None, config=None, logger=None,
- cluster='ceph'):
+ cluster='ceph', ceph_daemon=False):
self.lock = threading.RLock()
self.ctx = ctx
self.config = config
self.controller = controller
self.next_pool_id = 0
self.cluster = cluster
+ self.ceph_daemon = ceph_daemon
if (logger):
self.log = lambda x: logger.info(x)
else:
"""
Start ceph on a raw cluster. Return count
"""
- testdir = teuthology.get_testdir(self.ctx)
- ceph_args = [
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'timeout',
- '120',
- 'ceph',
- '--cluster',
- self.cluster,
- ]
- ceph_args.extend(args)
- proc = self.controller.run(
- args=ceph_args,
- stdout=StringIO(),
+ if self.ceph_daemon:
+ proc = shell(self.ctx, self.cluster, self.controller,
+ args=['ceph'] + list(args),
+ stdout=StringIO())
+ else:
+ testdir = teuthology.get_testdir(self.ctx)
+ ceph_args = [
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'timeout',
+ '120',
+ 'ceph',
+ '--cluster',
+ self.cluster,
+ ]
+ ceph_args.extend(args)
+ proc = self.controller.run(
+ args=ceph_args,
+ stdout=StringIO(),
)
return proc.stdout.getvalue()
"""
Start ceph on a cluster. Return success or failure information.
"""
- testdir = teuthology.get_testdir(self.ctx)
- ceph_args = [
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'timeout',
- '120',
- 'ceph',
- '--cluster',
- self.cluster,
- ]
- ceph_args.extend(args)
- kwargs['args'] = ceph_args
- kwargs['check_status'] = False
- proc = self.controller.run(**kwargs)
+ if self.ceph_daemon:
+ proc = shell(self.ctx, self.cluster, self.controller,
+ args=['ceph'] + list(args),
+ check_status=False)
+ else:
+ testdir = teuthology.get_testdir(self.ctx)
+ ceph_args = [
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'timeout',
+ '120',
+ 'ceph',
+ '--cluster',
+ self.cluster,
+ ]
+ ceph_args.extend(args)
+ kwargs['args'] = ceph_args
+ kwargs['check_status'] = False
+ proc = self.controller.run(**kwargs)
return proc.exitstatus
def run_ceph_w(self, watch_channel=None):