journal = ctx.disk_config.remote_to_roles_to_journals[remote][osd]
mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd]
fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
- mnt = os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=osd))
+ mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd))
log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format(
o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
),
)
- log.info('Setting up osd nodes...')
- for remote, roles_for_host in osds.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- remote.run(
- args=[
- '{tdir}/enable-coredump'.format(tdir=testdir),
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- '--name=osd.{id}'.format(id=id_),
- '{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_),
- ],
- )
-
log.info('Setting up mds nodes...')
mdss = ctx.cluster.only(teuthology.is_type('mds'))
for remote, roles_for_host in mdss.remotes.iteritems():
for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
remote.run(
args=[
+ 'sudo',
+ 'mkdir',
+ '-p',
+ '/var/lib/ceph/mds/ceph-{id}'.format(id=id_),
+ run.Raw('&&'),
'{tdir}/enable-coredump'.format(tdir=testdir),
'ceph-coverage',
coverage_dir,
+ 'sudo',
'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=mds.{id}'.format(id=id_),
- '{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_),
+ '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_),
],
)
],
)
- log.info('Reading keys from all nodes...')
- keys_fp = StringIO()
- keys = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for type_ in ['osd', 'mds']:
- for id_ in teuthology.roles_of_type(roles_for_host, type_):
- data = teuthology.get_file(
- remote=remote,
- path='{tdir}/data/{type}.{id}.keyring'.format(
- tdir=testdir,
- type=type_,
- id=id_,
- ),
- )
- keys.append((type_, id_, data))
- keys_fp.write(data)
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for type_ in ['client']:
- for id_ in teuthology.roles_of_type(roles_for_host, type_):
- data = teuthology.get_file(
- remote=remote,
- path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- )
- keys.append((type_, id_, data))
- keys_fp.write(data)
-
- log.info('Adding keys to all mons...')
- writes = mons.run(
- args=[
- 'sudo', 'tee', '-a',
- keyring_path,
- ],
- stdin=run.PIPE,
- wait=False,
- stdout=StringIO(),
- )
- keys_fp.seek(0)
- teuthology.feed_many_stdins_and_close(keys_fp, writes)
- run.wait(writes)
- for type_, id_, data in keys:
- run.wait(
- mons.run(
- args=[
- 'sudo',
- '{tdir}/enable-coredump'.format(tdir=testdir),
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- keyring_path,
- '--name={type}.{id}'.format(
- type=type_,
- id=id_,
- ),
- ] + list(teuthology.generate_caps(type_)),
- wait=False,
- ),
- )
-
- log.info('Running mkfs on mon nodes...')
- for remote, roles_for_host in mons.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
- remote.run(
- args=[
- '{tdir}/enable-coredump'.format(tdir=testdir),
- 'ceph-coverage',
- coverage_dir,
- 'ceph-mon',
- '--mkfs',
- '-i', id_,
- '--monmap={tdir}/monmap'.format(tdir=testdir),
- '--osdmap={tdir}/osdmap'.format(tdir=testdir),
- '--keyring={kpath}'.format(kpath=keyring_path),
- ],
- )
-
log.info('Running mkfs on osd nodes...')
for remote, roles_for_host in osds.remotes.iteritems():
roles_to_devs = remote_to_roles_to_devs[remote]
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- log.info(str(roles_to_journals))
- log.info(id_)
remote.run(
args=[
+ 'sudo',
'mkdir',
- os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
- ],
- )
+ '-p',
+ '/var/lib/ceph/osd/ceph-{id}'.format(id=id_),
+ ])
+ log.info(str(roles_to_journals))
+ log.info(id_)
if roles_to_devs.get(id_):
dev = roles_to_devs[id_]
fs = config.get('fs')
'-t', fs,
'-o', ','.join(mount_options),
dev,
- os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
+ os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
]
)
if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
- remote.run(
- args=[
- 'sudo', 'chown', '-R', 'ubuntu.ubuntu',
- os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
- ]
- )
- remote.run(
- args=[
- 'sudo', 'chmod', '-R', '755',
- os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
- ]
- )
devs_to_clean[remote].append(
os.path.join(
- '{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)
+ os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
)
)
'{tdir}/enable-coredump'.format(tdir=testdir),
'ceph-coverage',
coverage_dir,
+ 'sudo',
'ceph-osd',
'--mkfs',
+ '--mkkey',
'-i', id_,
'--monmap', '{tdir}/monmap'.format(tdir=testdir),
],
)
+
+
+ log.info('Reading keys from all nodes...')
+ keys_fp = StringIO()
+ keys = []
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for type_ in ['mds','osd']:
+ for id_ in teuthology.roles_of_type(roles_for_host, type_):
+ data = teuthology.get_file(
+ remote=remote,
+ path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format(
+ type=type_,
+ id=id_,
+ ),
+ sudo=True,
+ )
+ keys.append((type_, id_, data))
+ keys_fp.write(data)
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for type_ in ['client']:
+ for id_ in teuthology.roles_of_type(roles_for_host, type_):
+ data = teuthology.get_file(
+ remote=remote,
+ path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ )
+ keys.append((type_, id_, data))
+ keys_fp.write(data)
+
+ log.info('Adding keys to all mons...')
+ writes = mons.run(
+ args=[
+ 'sudo', 'tee', '-a',
+ keyring_path,
+ ],
+ stdin=run.PIPE,
+ wait=False,
+ stdout=StringIO(),
+ )
+ keys_fp.seek(0)
+ teuthology.feed_many_stdins_and_close(keys_fp, writes)
+ run.wait(writes)
+ for type_, id_, data in keys:
+ run.wait(
+ mons.run(
+ args=[
+ 'sudo',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ keyring_path,
+ '--name={type}.{id}'.format(
+ type=type_,
+ id=id_,
+ ),
+ ] + list(teuthology.generate_caps(type_)),
+ wait=False,
+ ),
+ )
+
+ log.info('Running mkfs on mon nodes...')
+ for remote, roles_for_host in mons.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
+ remote.run(
+ args=[
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ 'ceph-coverage',
+ coverage_dir,
+ 'sudo',
+ 'ceph-mon',
+ '--mkfs',
+ '-i', id_,
+ '--monmap={tdir}/monmap'.format(tdir=testdir),
+ '--osdmap={tdir}/osdmap'.format(tdir=testdir),
+ '--keyring={kpath}'.format(kpath=keyring_path),
+ ],
+ )
+
+
run.wait(
mons.run(
args=[
for remote, roles in mons.remotes.iteritems():
for role in roles:
if role.startswith('mon.'):
- teuthology.pull_directory_tarball(remote,
- '%s/data/%s' % (testdir, role),
- path + '/' + role + '.tgz')
+ teuthology.pull_directory_tarball(
+ remote,
+ '/var/lib/ceph/mon',
+ path + '/' + role + '.tgz')
log.info('Cleaning ceph cluster...')
run.wait(
'{tdir}/enable-coredump'.format(tdir=testdir),
'ceph-coverage',
coverage_dir,
+ 'sudo',
'{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
]