]> git.apps.os.sepia.ceph.com Git - teuthology.git/commitdiff
ceph: use default data, keyring locations
authorSage Weil <sage@inktank.com>
Sun, 17 Feb 2013 06:32:16 +0000 (22:32 -0800)
committerSage Weil <sage@inktank.com>
Mon, 18 Feb 2013 21:39:05 +0000 (13:39 -0800)
This required reordering the cluster setup so that we do the ceph-osd
--mkfs --mkkey prior to gathering keys and initializing the monitors.

Also, run daemons as root.

Signed-off-by: Sage Weil <sage@inktank.com>
teuthology/ceph.conf.template
teuthology/misc.py
teuthology/nuke.py
teuthology/task/ceph.py
teuthology/task/scrub_test.py

index e9f0185968d769f002b55b34dd6092ed0510d61d..d4689b592824e5187f705791bf01452020d1b6a0 100644 (file)
        osd crush chooseleaf type = 0
 
 [mon]
-        mon data = {testdir}/data/mon.$id
        mon cluster log file = {testdir}/archive/log/cluster.mon.$id.log
 
 [osd]
-        osd data = {testdir}/data/osd.$id.data
-        osd journal = {testdir}/data/osd.$id.journal
         osd journal size = 100
-        keyring = {testdir}/data/osd.$id.keyring
         osd class dir = /usr/lib/rados-classes
 
         osd scrub load threshold = 5.0
@@ -29,7 +25,6 @@
        osd recovery max chunk = 1048576
 
 [mds]
-        keyring = {testdir}/data/mds.$id.keyring
         lockdep = 1
         mds debug scatterstat = true
         mds verify scatter = true
index 1671d3673603650baa276cff163be5bd7bedb6ef..06bf521f41017494721d1583a8c03b6bed92b9ad 100644 (file)
@@ -274,12 +274,16 @@ def get_file(remote, path, sudo=False):
     """
     Read a file from remote host into memory.
     """
-    proc = remote.run(
-        args=[
+    args = []
+    if sudo:
+        args.append('sudo')
+    args.extend([
             'cat',
             '--',
             path,
-            ],
+            ])
+    proc = remote.run(
+        args=args,
         stdout=StringIO(),
         )
     data = proc.stdout.getvalue()
@@ -339,6 +343,7 @@ def pull_directory_tarball(remote, remotedir, localfile):
     out = open(localfile, 'w')
     proc = remote.run(
         args=[
+            'sudo',
             'tar',
             'cz',
             '-f', '-',
index e7c04ca9c8f79b02b9625d176510815a8d7c6490..934cdedfefb5e1ca0c13e3d8801ed21aab399df7 100644 (file)
@@ -156,7 +156,7 @@ def remove_osd_mounts(ctx, log):
     ctx.cluster.run(
         args=[
             'grep',
-            '{tdir}/data/'.format(tdir=get_testdir(ctx)),
+            '/var/lib/ceph/osd/',
             '/etc/mtab',
             run.Raw('|'),
             'awk', '{print $2}', run.Raw('|'),
index 9de4075c01594d9a94c3d987286c4a174b3221c8..3d75e51e90997a655e484b2e8d600c87e85c33f5 100644 (file)
@@ -360,7 +360,7 @@ def mount_osd_data(ctx, remote, osd):
         journal = ctx.disk_config.remote_to_roles_to_journals[remote][osd]
         mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd]
         fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
-        mnt = os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=osd))
+        mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd))
 
         log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format(
                  o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
@@ -595,36 +595,26 @@ def cluster(ctx, config):
             ),
         )
 
-    log.info('Setting up osd nodes...')
-    for remote, roles_for_host in osds.remotes.iteritems():
-        for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
-            remote.run(
-                args=[
-                    '{tdir}/enable-coredump'.format(tdir=testdir),
-                    'ceph-coverage',
-                    coverage_dir,
-                    'ceph-authtool',
-                    '--create-keyring',
-                    '--gen-key',
-                    '--name=osd.{id}'.format(id=id_),
-                    '{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_),
-                    ],
-                )
-
     log.info('Setting up mds nodes...')
     mdss = ctx.cluster.only(teuthology.is_type('mds'))
     for remote, roles_for_host in mdss.remotes.iteritems():
         for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
             remote.run(
                 args=[
+                    'sudo',
+                    'mkdir',
+                    '-p',
+                    '/var/lib/ceph/mds/ceph-{id}'.format(id=id_),
+                    run.Raw('&&'),
                     '{tdir}/enable-coredump'.format(tdir=testdir),
                     'ceph-coverage',
                     coverage_dir,
+                    'sudo',
                     'ceph-authtool',
                     '--create-keyring',
                     '--gen-key',
                     '--name=mds.{id}'.format(id=id_),
-                    '{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_),
+                    '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_),
                     ],
                 )
 
@@ -653,81 +643,6 @@ def cluster(ctx, config):
                     ],
                 )
 
-    log.info('Reading keys from all nodes...')
-    keys_fp = StringIO()
-    keys = []
-    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
-        for type_ in ['osd', 'mds']:
-            for id_ in teuthology.roles_of_type(roles_for_host, type_):
-                data = teuthology.get_file(
-                    remote=remote,
-                    path='{tdir}/data/{type}.{id}.keyring'.format(
-                        tdir=testdir,
-                        type=type_,
-                        id=id_,
-                        ),
-                    )
-                keys.append((type_, id_, data))
-                keys_fp.write(data)
-    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
-        for type_ in ['client']:
-            for id_ in teuthology.roles_of_type(roles_for_host, type_):
-                data = teuthology.get_file(
-                    remote=remote,
-                    path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
-                    )
-                keys.append((type_, id_, data))
-                keys_fp.write(data)
-
-    log.info('Adding keys to all mons...')
-    writes = mons.run(
-        args=[
-            'sudo', 'tee', '-a',
-            keyring_path,
-            ],
-        stdin=run.PIPE,
-        wait=False,
-        stdout=StringIO(),
-        )
-    keys_fp.seek(0)
-    teuthology.feed_many_stdins_and_close(keys_fp, writes)
-    run.wait(writes)
-    for type_, id_, data in keys:
-        run.wait(
-            mons.run(
-                args=[
-                    'sudo',
-                    '{tdir}/enable-coredump'.format(tdir=testdir),
-                    'ceph-coverage',
-                    coverage_dir,
-                    'ceph-authtool',
-                    keyring_path,
-                    '--name={type}.{id}'.format(
-                        type=type_,
-                        id=id_,
-                        ),
-                    ] + list(teuthology.generate_caps(type_)),
-                wait=False,
-                ),
-            )
-
-    log.info('Running mkfs on mon nodes...')
-    for remote, roles_for_host in mons.remotes.iteritems():
-        for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
-            remote.run(
-                args=[
-                    '{tdir}/enable-coredump'.format(tdir=testdir),
-                    'ceph-coverage',
-                    coverage_dir,
-                    'ceph-mon',
-                    '--mkfs',
-                    '-i', id_,
-                    '--monmap={tdir}/monmap'.format(tdir=testdir),
-                    '--osdmap={tdir}/osdmap'.format(tdir=testdir),
-                    '--keyring={kpath}'.format(kpath=keyring_path),
-                    ],
-                )
-
     log.info('Running mkfs on osd nodes...')
     for remote, roles_for_host in osds.remotes.iteritems():
         roles_to_devs = remote_to_roles_to_devs[remote]
@@ -740,14 +655,15 @@ def cluster(ctx, config):
 
 
         for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
-            log.info(str(roles_to_journals))
-            log.info(id_)
             remote.run(
                 args=[
+                    'sudo',
                     'mkdir',
-                    os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
-                    ],
-                )
+                    '-p',
+                    '/var/lib/ceph/osd/ceph-{id}'.format(id=id_),
+                    ])
+            log.info(str(roles_to_journals))
+            log.info(id_)
             if roles_to_devs.get(id_):
                 dev = roles_to_devs[id_]
                 fs = config.get('fs')
@@ -796,7 +712,7 @@ def cluster(ctx, config):
                         '-t', fs,
                         '-o', ','.join(mount_options),
                         dev,
-                        os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
+                        os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
                         ]
                     )
                 if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
@@ -805,21 +721,9 @@ def cluster(ctx, config):
                 if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
                     ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
                 ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
-                remote.run(
-                    args=[
-                        'sudo', 'chown', '-R', 'ubuntu.ubuntu',
-                        os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
-                        ]
-                    )
-                remote.run(
-                    args=[
-                        'sudo', 'chmod', '-R', '755',
-                        os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
-                        ]
-                    )
                 devs_to_clean[remote].append(
                     os.path.join(
-                        '{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)
+                        os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
                         )
                     )
 
@@ -830,12 +734,93 @@ def cluster(ctx, config):
                     '{tdir}/enable-coredump'.format(tdir=testdir),
                     'ceph-coverage',
                     coverage_dir,
+                    'sudo',
                     'ceph-osd',
                     '--mkfs',
+                    '--mkkey',
                     '-i', id_,
                     '--monmap', '{tdir}/monmap'.format(tdir=testdir),
                     ],
                 )
+
+
+    log.info('Reading keys from all nodes...')
+    keys_fp = StringIO()
+    keys = []
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        for type_ in ['mds','osd']:
+            for id_ in teuthology.roles_of_type(roles_for_host, type_):
+                data = teuthology.get_file(
+                    remote=remote,
+                    path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format(
+                        type=type_,
+                        id=id_,
+                        ),
+                    sudo=True,
+                    )
+                keys.append((type_, id_, data))
+                keys_fp.write(data)
+    for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+        for type_ in ['client']:
+            for id_ in teuthology.roles_of_type(roles_for_host, type_):
+                data = teuthology.get_file(
+                    remote=remote,
+                    path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+                    )
+                keys.append((type_, id_, data))
+                keys_fp.write(data)
+
+    log.info('Adding keys to all mons...')
+    writes = mons.run(
+        args=[
+            'sudo', 'tee', '-a',
+            keyring_path,
+            ],
+        stdin=run.PIPE,
+        wait=False,
+        stdout=StringIO(),
+        )
+    keys_fp.seek(0)
+    teuthology.feed_many_stdins_and_close(keys_fp, writes)
+    run.wait(writes)
+    for type_, id_, data in keys:
+        run.wait(
+            mons.run(
+                args=[
+                    'sudo',
+                    '{tdir}/enable-coredump'.format(tdir=testdir),
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-authtool',
+                    keyring_path,
+                    '--name={type}.{id}'.format(
+                        type=type_,
+                        id=id_,
+                        ),
+                    ] + list(teuthology.generate_caps(type_)),
+                wait=False,
+                ),
+            )
+
+    log.info('Running mkfs on mon nodes...')
+    for remote, roles_for_host in mons.remotes.iteritems():
+        for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
+            remote.run(
+                args=[
+                    '{tdir}/enable-coredump'.format(tdir=testdir),
+                    'ceph-coverage',
+                    coverage_dir,
+                    'sudo',
+                    'ceph-mon',
+                    '--mkfs',
+                    '-i', id_,
+                    '--monmap={tdir}/monmap'.format(tdir=testdir),
+                    '--osdmap={tdir}/osdmap'.format(tdir=testdir),
+                    '--keyring={kpath}'.format(kpath=keyring_path),
+                    ],
+                )
+
+
     run.wait(
         mons.run(
             args=[
@@ -918,9 +903,10 @@ def cluster(ctx, config):
             for remote, roles in mons.remotes.iteritems():
                 for role in roles:
                     if role.startswith('mon.'):
-                        teuthology.pull_directory_tarball(remote,
-                                       '%s/data/%s' % (testdir, role),
-                                       path + '/' + role + '.tgz')
+                        teuthology.pull_directory_tarball(
+                            remote,
+                            '/var/lib/ceph/mon',
+                            path + '/' + role + '.tgz')
 
         log.info('Cleaning ceph cluster...')
         run.wait(
@@ -964,6 +950,7 @@ def run_daemon(ctx, config, type_):
                 '{tdir}/enable-coredump'.format(tdir=testdir),
                 'ceph-coverage',
                 coverage_dir,
+                'sudo',
                 '{tdir}/daemon-helper'.format(tdir=testdir),
                 daemon_signal,
                 ]
index 515c2f43c228932cd8728f50080503fc600a4478..cfcee5ac0c244a2511821164bf7f8249e310b905 100644 (file)
@@ -59,11 +59,12 @@ def task(ctx, config):
     log.info('messing with PG %s on osd %d' % (victim, osd))
 
     (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
-    data_path = os.path.join('{tdir}/data'.format(tdir=teuthology.get_testdir(ctx)),
-                             'osd.{id}.data'.format(id=osd),
-                             'current',
-                             '{pg}_head'.format(pg=victim)
-                            )
+    data_path = os.path.join(
+        '/var/lib/ceph/osd',
+        'ceph-{id}'.format(id=osd),
+        'current',
+        '{pg}_head'.format(pg=victim)
+        )
 
     # fuzz time
     ls_fp = StringIO()