]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph.py: add btrfs option
authorSamuel Just <samuel.just@dreamhost.com>
Mon, 3 Oct 2011 21:03:36 +0000 (14:03 -0700)
committerSamuel Just <samuel.just@dreamhost.com>
Mon, 3 Oct 2011 21:26:04 +0000 (14:26 -0700)
Signed-off-by: Samuel Just <samuel.just@dreamhost.com>
teuthology/misc.py
teuthology/task/ceph.py

index 488c0a3ffcd44d8043aa75d6731892e018425029..601af35ef22127dd5d5f83015e9efd12be3fe408 100644 (file)
@@ -234,6 +234,31 @@ def get_file(remote, path):
     data = proc.stdout.getvalue()
     return data
 
+def get_scratch_devices(remote):
+    """
+    Read the scratch disk list from remote host
+    """
+    devs = []
+    try:
+        file_data = get_file(remote, "/scratch_devs")
+        devs = file_data.split()
+    except:
+        devs = ['/dev/sdb']
+
+    retval = []
+    for dev in devs:
+        try:
+            remote.run(
+                args=[
+                    'stat',
+                    dev
+                    ]
+                )
+            retval.append(dev)
+        except:
+            pass
+    return retval
+
 def wait_until_healthy(remote):
     """Wait until a Ceph cluster is healthy."""
     while True:
index 61841b4732531d560b37b255ba048a06888c7f17..ce359fec39e673763c68a239889611178fa8c408 100644 (file)
@@ -258,6 +258,10 @@ def binaries(ctx, config):
                 ),
             )
 
+
+def assign_devs(roles, devs):
+    return dict(zip(roles, devs))
+
 @contextlib.contextmanager
 def valgrind_post(ctx, config):
     try:
@@ -542,7 +546,19 @@ def cluster(ctx, config):
                 )
 
     log.info('Running mkfs on osd nodes...')
+    devs_to_clean = {}
     for remote, roles_for_host in osds.remotes.iteritems():
+        roles_to_devs = {}
+        if config.get('btrfs'):
+            log.info('btrfs option selected, checkin for scrach devs')
+            devs = teuthology.get_scratch_devices(remote)
+            log.info('found devs: %s' % (str(devs),))
+            roles_to_devs = assign_devs(
+                teuthology.roles_of_type(roles_for_host, 'osd'), devs
+                )
+            log.info('dev map: %s' % (str(roles_to_devs),))
+            devs_to_clean[remote] = []
+
         for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
             remote.run(
                 args=[
@@ -550,6 +566,50 @@ def cluster(ctx, config):
                     os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)),
                     ],
                 )
+            if roles_to_devs.get(id_):
+                dev = roles_to_devs[id_]
+                log.info('mkfs.btrfs on %s on %s' % (dev, remote))
+                remote.run(
+                    args=[
+                        'sudo',
+                        'apt-get', 'install', '-y', 'btrfs-tools'
+                        ]
+                    )
+                remote.run(
+                    args=[
+                        'sudo',
+                        'mkfs.btrfs',
+                        dev
+                        ]
+                    )
+                log.info('mount %s on %s' % (dev, remote))
+                remote.run(
+                    args=[
+                        'sudo',
+                        'mount',
+                        dev,
+                        os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)),
+                        ]
+                    )
+                remote.run(
+                    args=[
+                        'sudo', 'chown', '-R', 'ubuntu.ubuntu',
+                        os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_))
+                        ]
+                    )
+                remote.run(
+                    args=[
+                        'sudo', 'chmod', '-R', '755',
+                        os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_))
+                        ]
+                    )
+                devs_to_clean[remote].append(
+                    os.path.join(
+                        '/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)
+                        )
+                    )
+
+        for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
             remote.run(
                 args=[
                     '/tmp/cephtest/enable-coredump',
@@ -605,6 +665,18 @@ def cluster(ctx, config):
             log.warning('Found errors (ERR|WRN|SEC) in cluster log')
             ctx.summary['success'] = False
 
+        for remote, dirs in devs_to_clean.iteritems():
+            for dir_ in dirs:
+                log.info('Unmounting %s on %s' % (dir_, remote))
+                remote.run(
+                    args=[
+                        "sudo",
+                        "umount",
+                        "-f",
+                        dir_
+                        ]
+                    )
+
         log.info('Cleaning ceph cluster...')
         run.wait(
             ctx.cluster.run(
@@ -851,6 +923,13 @@ def task(ctx, config):
         - ceph:
             coverage: true
 
+    To use btrfs on the osds, use::
+        tasks:
+        - ceph:
+            btrfs: true
+    Note, this will cause the task to check the /scratch_devs file on each node
+    for available devices.  If no such file is found, /dev/sdb will be used.
+
     To run some daemons under valgrind, include their names
     and the tool to use in a valgrind section::
         tasks:
@@ -942,7 +1021,8 @@ def task(ctx, config):
                 )),
         lambda: valgrind_post(ctx=ctx, config=config),
         lambda: cluster(ctx=ctx, config=dict(
-                conf=config.get('conf', {})
+                conf=config.get('conf', {}),
+                btrfs=config.get('btrfs', False)
                 )),
         lambda: mon(ctx=ctx, config=config),
         lambda: osd(ctx=ctx, config=config),