From: Sage Weil Date: Sun, 17 Feb 2013 06:32:16 +0000 (-0800) Subject: ceph: use default data, keyring locations X-Git-Tag: v0.94.10~27^2^2~364^2~1022^2~17 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=d1d36241b7a5e3a5f0b32b39288ed54c9cef2ca0;p=ceph.git ceph: use default data, keyring locations This required reordering the cluster setup so that we do the ceph-osd --mkfs --mkkey prior to gathering keys and initializing the monitors. Also, run daemons as root. Signed-off-by: Sage Weil --- diff --git a/teuthology/ceph.conf.template b/teuthology/ceph.conf.template index e9f0185968d7..d4689b592824 100644 --- a/teuthology/ceph.conf.template +++ b/teuthology/ceph.conf.template @@ -12,14 +12,10 @@ osd crush chooseleaf type = 0 [mon] - mon data = {testdir}/data/mon.$id mon cluster log file = {testdir}/archive/log/cluster.mon.$id.log [osd] - osd data = {testdir}/data/osd.$id.data - osd journal = {testdir}/data/osd.$id.journal osd journal size = 100 - keyring = {testdir}/data/osd.$id.keyring osd class dir = /usr/lib/rados-classes osd scrub load threshold = 5.0 @@ -29,7 +25,6 @@ osd recovery max chunk = 1048576 [mds] - keyring = {testdir}/data/mds.$id.keyring lockdep = 1 mds debug scatterstat = true mds verify scatter = true diff --git a/teuthology/misc.py b/teuthology/misc.py index 1671d3673603..06bf521f4101 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -274,12 +274,16 @@ def get_file(remote, path, sudo=False): """ Read a file from remote host into memory. """ - proc = remote.run( - args=[ + args = [] + if sudo: + args.append('sudo') + args.extend([ 'cat', '--', path, - ], + ]) + proc = remote.run( + args=args, stdout=StringIO(), ) data = proc.stdout.getvalue() @@ -339,6 +343,7 @@ def pull_directory_tarball(remote, remotedir, localfile): out = open(localfile, 'w') proc = remote.run( args=[ + 'sudo', 'tar', 'cz', '-f', '-', diff --git a/teuthology/nuke.py b/teuthology/nuke.py index e7c04ca9c8f7..934cdedfefb5 100644 --- a/teuthology/nuke.py +++ b/teuthology/nuke.py @@ -156,7 +156,7 @@ def remove_osd_mounts(ctx, log): ctx.cluster.run( args=[ 'grep', - '{tdir}/data/'.format(tdir=get_testdir(ctx)), + '/var/lib/ceph/osd/', '/etc/mtab', run.Raw('|'), 'awk', '{print $2}', run.Raw('|'), diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 9de4075c0159..3d75e51e9099 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -360,7 +360,7 @@ def mount_osd_data(ctx, remote, osd): journal = ctx.disk_config.remote_to_roles_to_journals[remote][osd] mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd] fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd] - mnt = os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=osd)) + mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd)) log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format( o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options)) @@ -595,36 +595,26 @@ def cluster(ctx, config): ), ) - log.info('Setting up osd nodes...') - for remote, roles_for_host in osds.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - remote.run( - args=[ - '{tdir}/enable-coredump'.format(tdir=testdir), - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--create-keyring', - '--gen-key', - '--name=osd.{id}'.format(id=id_), - '{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_), - ], - ) - log.info('Setting up mds nodes...') mdss = ctx.cluster.only(teuthology.is_type('mds')) for remote, roles_for_host in mdss.remotes.iteritems(): for id_ in teuthology.roles_of_type(roles_for_host, 'mds'): remote.run( args=[ + 'sudo', + 'mkdir', + '-p', + '/var/lib/ceph/mds/ceph-{id}'.format(id=id_), + run.Raw('&&'), '{tdir}/enable-coredump'.format(tdir=testdir), 'ceph-coverage', coverage_dir, + 'sudo', 'ceph-authtool', '--create-keyring', '--gen-key', '--name=mds.{id}'.format(id=id_), - '{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_), + '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_), ], ) @@ -653,81 +643,6 @@ def cluster(ctx, config): ], ) - log.info('Reading keys from all nodes...') - keys_fp = StringIO() - keys = [] - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for type_ in ['osd', 'mds']: - for id_ in teuthology.roles_of_type(roles_for_host, type_): - data = teuthology.get_file( - remote=remote, - path='{tdir}/data/{type}.{id}.keyring'.format( - tdir=testdir, - type=type_, - id=id_, - ), - ) - keys.append((type_, id_, data)) - keys_fp.write(data) - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for type_ in ['client']: - for id_ in teuthology.roles_of_type(roles_for_host, type_): - data = teuthology.get_file( - remote=remote, - path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) - ) - keys.append((type_, id_, data)) - keys_fp.write(data) - - log.info('Adding keys to all mons...') - writes = mons.run( - args=[ - 'sudo', 'tee', '-a', - keyring_path, - ], - stdin=run.PIPE, - wait=False, - stdout=StringIO(), - ) - keys_fp.seek(0) - teuthology.feed_many_stdins_and_close(keys_fp, writes) - run.wait(writes) - for type_, id_, data in keys: - run.wait( - mons.run( - args=[ - 'sudo', - '{tdir}/enable-coredump'.format(tdir=testdir), - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - keyring_path, - '--name={type}.{id}'.format( - type=type_, - id=id_, - ), - ] + list(teuthology.generate_caps(type_)), - wait=False, - ), - ) - - log.info('Running mkfs on mon nodes...') - for remote, roles_for_host in mons.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'mon'): - remote.run( - args=[ - '{tdir}/enable-coredump'.format(tdir=testdir), - 'ceph-coverage', - coverage_dir, - 'ceph-mon', - '--mkfs', - '-i', id_, - '--monmap={tdir}/monmap'.format(tdir=testdir), - '--osdmap={tdir}/osdmap'.format(tdir=testdir), - '--keyring={kpath}'.format(kpath=keyring_path), - ], - ) - log.info('Running mkfs on osd nodes...') for remote, roles_for_host in osds.remotes.iteritems(): roles_to_devs = remote_to_roles_to_devs[remote] @@ -740,14 +655,15 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - log.info(str(roles_to_journals)) - log.info(id_) remote.run( args=[ + 'sudo', 'mkdir', - os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)), - ], - ) + '-p', + '/var/lib/ceph/osd/ceph-{id}'.format(id=id_), + ]) + log.info(str(roles_to_journals)) + log.info(id_) if roles_to_devs.get(id_): dev = roles_to_devs[id_] fs = config.get('fs') @@ -796,7 +712,7 @@ def cluster(ctx, config): '-t', fs, '-o', ','.join(mount_options), dev, - os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)), + os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), ] ) if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options: @@ -805,21 +721,9 @@ def cluster(ctx, config): if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype: ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {} ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs - remote.run( - args=[ - 'sudo', 'chown', '-R', 'ubuntu.ubuntu', - os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)) - ] - ) - remote.run( - args=[ - 'sudo', 'chmod', '-R', '755', - os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)) - ] - ) devs_to_clean[remote].append( os.path.join( - '{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_) + os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), ) ) @@ -830,12 +734,93 @@ def cluster(ctx, config): '{tdir}/enable-coredump'.format(tdir=testdir), 'ceph-coverage', coverage_dir, + 'sudo', 'ceph-osd', '--mkfs', + '--mkkey', '-i', id_, '--monmap', '{tdir}/monmap'.format(tdir=testdir), ], ) + + + log.info('Reading keys from all nodes...') + keys_fp = StringIO() + keys = [] + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for type_ in ['mds','osd']: + for id_ in teuthology.roles_of_type(roles_for_host, type_): + data = teuthology.get_file( + remote=remote, + path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format( + type=type_, + id=id_, + ), + sudo=True, + ) + keys.append((type_, id_, data)) + keys_fp.write(data) + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for type_ in ['client']: + for id_ in teuthology.roles_of_type(roles_for_host, type_): + data = teuthology.get_file( + remote=remote, + path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + ) + keys.append((type_, id_, data)) + keys_fp.write(data) + + log.info('Adding keys to all mons...') + writes = mons.run( + args=[ + 'sudo', 'tee', '-a', + keyring_path, + ], + stdin=run.PIPE, + wait=False, + stdout=StringIO(), + ) + keys_fp.seek(0) + teuthology.feed_many_stdins_and_close(keys_fp, writes) + run.wait(writes) + for type_, id_, data in keys: + run.wait( + mons.run( + args=[ + 'sudo', + '{tdir}/enable-coredump'.format(tdir=testdir), + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + keyring_path, + '--name={type}.{id}'.format( + type=type_, + id=id_, + ), + ] + list(teuthology.generate_caps(type_)), + wait=False, + ), + ) + + log.info('Running mkfs on mon nodes...') + for remote, roles_for_host in mons.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'mon'): + remote.run( + args=[ + '{tdir}/enable-coredump'.format(tdir=testdir), + 'ceph-coverage', + coverage_dir, + 'sudo', + 'ceph-mon', + '--mkfs', + '-i', id_, + '--monmap={tdir}/monmap'.format(tdir=testdir), + '--osdmap={tdir}/osdmap'.format(tdir=testdir), + '--keyring={kpath}'.format(kpath=keyring_path), + ], + ) + + run.wait( mons.run( args=[ @@ -918,9 +903,10 @@ def cluster(ctx, config): for remote, roles in mons.remotes.iteritems(): for role in roles: if role.startswith('mon.'): - teuthology.pull_directory_tarball(remote, - '%s/data/%s' % (testdir, role), - path + '/' + role + '.tgz') + teuthology.pull_directory_tarball( + remote, + '/var/lib/ceph/mon', + path + '/' + role + '.tgz') log.info('Cleaning ceph cluster...') run.wait( @@ -964,6 +950,7 @@ def run_daemon(ctx, config, type_): '{tdir}/enable-coredump'.format(tdir=testdir), 'ceph-coverage', coverage_dir, + 'sudo', '{tdir}/daemon-helper'.format(tdir=testdir), daemon_signal, ] diff --git a/teuthology/task/scrub_test.py b/teuthology/task/scrub_test.py index 515c2f43c228..cfcee5ac0c24 100644 --- a/teuthology/task/scrub_test.py +++ b/teuthology/task/scrub_test.py @@ -59,11 +59,12 @@ def task(ctx, config): log.info('messing with PG %s on osd %d' % (victim, osd)) (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() - data_path = os.path.join('{tdir}/data'.format(tdir=teuthology.get_testdir(ctx)), - 'osd.{id}.data'.format(id=osd), - 'current', - '{pg}_head'.format(pg=victim) - ) + data_path = os.path.join( + '/var/lib/ceph/osd', + 'ceph-{id}'.format(id=osd), + 'current', + '{pg}_head'.format(pg=victim) + ) # fuzz time ls_fp = StringIO()