]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/rook: have it create bluestore OSDs in data_directories
authorJeff Layton <jlayton@redhat.com>
Thu, 21 Feb 2019 22:18:25 +0000 (17:18 -0500)
committerJeff Layton <jlayton@redhat.com>
Mon, 25 Feb 2019 18:18:38 +0000 (13:18 -0500)
When the data_directories field is populated, use that to populate the
spec.storage.nodes.<host>.directories field in the CephCluster object.
That will cue the rook operator to create an OSD in using an area under
that directory for storage.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
src/pybind/mgr/orchestrator.py
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py

index 3959d6a0c9e8fd110f1805830ae20f5d6641a9b5..598181b501e796e096d77445eb2dde0dd9920bae 100644 (file)
@@ -565,10 +565,10 @@ class DriveGroupSpec(object):
     Describe a drive group in the same form that ceph-volume
     understands.
     """
-    def __init__(self, host_pattern, data_devices, db_devices=None, wal_devices=None, journal_devices=None,
+    def __init__(self, host_pattern, data_devices=None, db_devices=None, wal_devices=None, journal_devices=None,
                  data_directories=None, osds_per_device=None, objectstore='bluestore', encrypted=False,
                  db_slots=None, wal_slots=None):
-        # type: (str, DeviceSelection, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> ()
+        # type: (str, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> ()
 
         # concept of applying a drive group to a (set) of hosts is tightly
         # linked to the drive group itself
index 0aac98a23d79b558b4d2a66b7fce2932cd820240..d5ca13fc19d4f55e008602783e4a05f11a11aa3c 100644 (file)
@@ -404,6 +404,12 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         # type: (orchestrator.DriveGroupSpec, List[str]) -> RookWriteCompletion
 
         assert len(drive_group.hosts(all_hosts)) == 1
+        targets = []
+        if drive_group.data_devices:
+            targets += drive_group.data_devices.paths
+        if drive_group.data_directories:
+            targets += drive_group.data_directories
+
         if not self.rook_cluster.node_exists(drive_group.hosts(all_hosts)[0]):
             raise RuntimeError("Node '{0}' is not in the Kubernetes "
                                "cluster".format(drive_group.hosts(all_hosts)))
@@ -438,7 +444,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
                     continue
 
                 metadata = self.get_metadata('osd', "%s" % osd_id)
-                if metadata and metadata['devices'] in drive_group.data_devices.paths:
+                if metadata and metadata['devices'] in targets:
                     found.append(osd_id)
                 else:
                     self.log.info("ignoring osd {0} {1}".format(
@@ -449,6 +455,5 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
 
         return RookWriteCompletion(execute, is_complete,
                                    "Creating OSD on {0}:{1}".format(
-                                       drive_group.hosts(all_hosts)[0],
-                                       drive_group.data_devices.paths
+                                       drive_group.hosts(all_hosts)[0], targets
                                    ))
index f42b95a0b5d006a899ec816538e9d8542b06d61b..4fda9b1874204debb50ca279a70ca46db69b775d 100644 (file)
@@ -357,7 +357,8 @@ class RookCluster(object):
         Rook currently (0.8) can only do single-drive OSDs, so we
         treat all drive groups as just a list of individual OSDs.
         """
-        block_devices = drive_group.data_devices.paths
+        block_devices = drive_group.data_devices.paths if drive_group.data_devices else None
+        directories = drive_group.data_directories
 
         assert drive_group.objectstore in ("bluestore", "filestore")
 
@@ -386,15 +387,15 @@ class RookCluster(object):
         current_nodes = current_cluster['spec']['storage'].get('nodes', [])
 
         if drive_group.hosts(all_hosts)[0] not in [n['name'] for n in current_nodes]:
-            patch.append({
-                "op": "add", "path": "/spec/storage/nodes/-", "value": {
-                    "name": drive_group.hosts(all_hosts)[0],
-                    "devices": [{'name': d} for d in block_devices],
-                    "storeConfig": {
-                        "storeType": drive_group.objectstore
-                    }
-                }
-            })
+            pd = { "name": drive_group.hosts(all_hosts)[0],
+                   "storeConfig": { "storeType": drive_group.objectstore }}
+
+            if block_devices:
+                pd["devices"] = [{'name': d} for d in block_devices]
+            if directories:
+                pd["directories"] = [{'path': p} for p in directories]
+
+            patch.append({ "op": "add", "path": "/spec/storage/nodes/-", "value": pd })
         else:
             # Extend existing node
             node_idx = None
@@ -409,7 +410,6 @@ class RookCluster(object):
             assert current_node is not None
 
             new_devices = list(set(block_devices) - set([d['name'] for d in current_node['devices']]))
-
             for n in new_devices:
                 patch.append({
                     "op": "add",
@@ -417,6 +417,14 @@ class RookCluster(object):
                     "value": {'name': n}
                 })
 
+            new_dirs = list(set(directories) - set(current_node['directories']))
+            for p in new_dirs:
+                patch.append({
+                    "op": "add",
+                    "path": "/spec/storage/nodes/{0}/directories/-".format(node_idx),
+                    "value": {'path': p}
+                })
+
         if len(patch) == 0:
             return "No change"