]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/ceph_manager: make mount_osd_data() cluster-aware
authorJosh Durgin <jdurgin@redhat.com>
Thu, 24 Mar 2016 00:15:40 +0000 (17:15 -0700)
committerJosh Durgin <jdurgin@redhat.com>
Fri, 20 May 2016 18:08:53 +0000 (11:08 -0700)
Use a cluster-specific mount point, and address osds by full role,
rather than just id, in the ctx.disk_config structures.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
(cherry picked from commit 713e717fda482cbe6396ee8af65ac7d81132852a)

tasks/ceph_manager.py

index 0342941b8ffd35c4bf52b2ec496f5b5e14ec41ee..5fc37843b6c2ee3d496350a76b1814c336b6e895 100644 (file)
@@ -49,26 +49,33 @@ def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
     run.wait(writes)
 
 
-def mount_osd_data(ctx, remote, osd):
+def mount_osd_data(ctx, remote, cluster, osd):
     """
     Mount a remote OSD
 
     :param ctx: Context
     :param remote: Remote site
-    :param ods: Osd name
+    :param cluster: name of ceph cluster
+    :param osd: Osd name
     """
     log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
-    if (remote in ctx.disk_config.remote_to_roles_to_dev and
-            osd in ctx.disk_config.remote_to_roles_to_dev[remote]):
-        dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd]
+    role = "{0}.osd.{1}".format(cluster, osd)
+    alt_role = role if cluster != 'ceph' else "osd.{0}".format(osd)
+    if remote in ctx.disk_config.remote_to_roles_to_dev:
+        if alt_role in ctx.disk_config.remote_to_roles_to_dev[remote]:
+            role = alt_role
+        if role not in ctx.disk_config.remote_to_roles_to_dev[remote]:
+            return
+        dev = ctx.disk_config.remote_to_roles_to_dev[remote][role]
         mount_options = ctx.disk_config.\
-            remote_to_roles_to_dev_mount_options[remote][osd]
-        fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
-        mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd))
+            remote_to_roles_to_dev_mount_options[remote][role]
+        fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role]
+        mnt = os.path.join('/var/lib/ceph/osd', '{0}-{1}'.format(cluster, osd))
 
-        log.info('Mounting osd.{o}: dev: {n}, '
+        log.info('Mounting osd.{o}: dev: {n}, cluster: {c}'
                  'mountpoint: {p}, type: {t}, options: {v}'.format(
-                     o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
+                     o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options,
+                     c=cluster))
 
         remote.run(
             args=[
@@ -1816,7 +1823,7 @@ class CephManager:
                 raise Exception('Failed to revive osd.{o} via ipmi'.
                                 format(o=osd))
             teuthology.reconnect(self.ctx, 60, [remote])
-            mount_osd_data(self.ctx, remote, str(osd))
+            mount_osd_data(self.ctx, remote, self.cluster, str(osd))
             self.make_admin_daemon_dir(remote)
             self.ctx.daemons.get_daemon('osd', osd, self.cluster).reset()
         self.ctx.daemons.get_daemon('osd', osd, self.cluster).restart()