From fc761fb302122894b4fc170bfbf45295feec7b8d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 21 Feb 2019 17:18:25 -0500 Subject: [PATCH] mgr/rook: have it create bluestore OSDs in data_directories When the data_directories field is populated, use that to populate the spec.storage.nodes..directories field in the CephCluster object. That will cue the rook operator to create an OSD in using an area under that directory for storage. Signed-off-by: Jeff Layton --- src/pybind/mgr/orchestrator.py | 4 ++-- src/pybind/mgr/rook/module.py | 11 ++++++++--- src/pybind/mgr/rook/rook_cluster.py | 30 ++++++++++++++++++----------- 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/src/pybind/mgr/orchestrator.py b/src/pybind/mgr/orchestrator.py index 3959d6a0c9e..598181b501e 100644 --- a/src/pybind/mgr/orchestrator.py +++ b/src/pybind/mgr/orchestrator.py @@ -565,10 +565,10 @@ class DriveGroupSpec(object): Describe a drive group in the same form that ceph-volume understands. """ - def __init__(self, host_pattern, data_devices, db_devices=None, wal_devices=None, journal_devices=None, + def __init__(self, host_pattern, data_devices=None, db_devices=None, wal_devices=None, journal_devices=None, data_directories=None, osds_per_device=None, objectstore='bluestore', encrypted=False, db_slots=None, wal_slots=None): - # type: (str, DeviceSelection, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> () + # type: (str, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> () # concept of applying a drive group to a (set) of hosts is tightly # linked to the drive group itself diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index 0aac98a23d7..d5ca13fc19d 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -404,6 +404,12 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): # type: (orchestrator.DriveGroupSpec, List[str]) -> RookWriteCompletion assert len(drive_group.hosts(all_hosts)) == 1 + targets = [] + if drive_group.data_devices: + targets += drive_group.data_devices.paths + if drive_group.data_directories: + targets += drive_group.data_directories + if not self.rook_cluster.node_exists(drive_group.hosts(all_hosts)[0]): raise RuntimeError("Node '{0}' is not in the Kubernetes " "cluster".format(drive_group.hosts(all_hosts))) @@ -438,7 +444,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): continue metadata = self.get_metadata('osd', "%s" % osd_id) - if metadata and metadata['devices'] in drive_group.data_devices.paths: + if metadata and metadata['devices'] in targets: found.append(osd_id) else: self.log.info("ignoring osd {0} {1}".format( @@ -449,6 +455,5 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): return RookWriteCompletion(execute, is_complete, "Creating OSD on {0}:{1}".format( - drive_group.hosts(all_hosts)[0], - drive_group.data_devices.paths + drive_group.hosts(all_hosts)[0], targets )) diff --git a/src/pybind/mgr/rook/rook_cluster.py b/src/pybind/mgr/rook/rook_cluster.py index f42b95a0b5d..4fda9b18742 100644 --- a/src/pybind/mgr/rook/rook_cluster.py +++ b/src/pybind/mgr/rook/rook_cluster.py @@ -357,7 +357,8 @@ class RookCluster(object): Rook currently (0.8) can only do single-drive OSDs, so we treat all drive groups as just a list of individual OSDs. """ - block_devices = drive_group.data_devices.paths + block_devices = drive_group.data_devices.paths if drive_group.data_devices else None + directories = drive_group.data_directories assert drive_group.objectstore in ("bluestore", "filestore") @@ -386,15 +387,15 @@ class RookCluster(object): current_nodes = current_cluster['spec']['storage'].get('nodes', []) if drive_group.hosts(all_hosts)[0] not in [n['name'] for n in current_nodes]: - patch.append({ - "op": "add", "path": "/spec/storage/nodes/-", "value": { - "name": drive_group.hosts(all_hosts)[0], - "devices": [{'name': d} for d in block_devices], - "storeConfig": { - "storeType": drive_group.objectstore - } - } - }) + pd = { "name": drive_group.hosts(all_hosts)[0], + "storeConfig": { "storeType": drive_group.objectstore }} + + if block_devices: + pd["devices"] = [{'name': d} for d in block_devices] + if directories: + pd["directories"] = [{'path': p} for p in directories] + + patch.append({ "op": "add", "path": "/spec/storage/nodes/-", "value": pd }) else: # Extend existing node node_idx = None @@ -409,7 +410,6 @@ class RookCluster(object): assert current_node is not None new_devices = list(set(block_devices) - set([d['name'] for d in current_node['devices']])) - for n in new_devices: patch.append({ "op": "add", @@ -417,6 +417,14 @@ class RookCluster(object): "value": {'name': n} }) + new_dirs = list(set(directories) - set(current_node['directories'])) + for p in new_dirs: + patch.append({ + "op": "add", + "path": "/spec/storage/nodes/{0}/directories/-".format(node_idx), + "value": {'path': p} + }) + if len(patch) == 0: return "No change" -- 2.39.5