Describe a drive group in the same form that ceph-volume
understands.
"""
- def __init__(self, host_pattern, data_devices, db_devices=None, wal_devices=None, journal_devices=None,
+ def __init__(self, host_pattern, data_devices=None, db_devices=None, wal_devices=None, journal_devices=None,
data_directories=None, osds_per_device=None, objectstore='bluestore', encrypted=False,
db_slots=None, wal_slots=None):
- # type: (str, DeviceSelection, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> ()
+ # type: (str, Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[DeviceSelection], Optional[List[str]], int, str, bool, int, int) -> ()
# concept of applying a drive group to a (set) of hosts is tightly
# linked to the drive group itself
# type: (orchestrator.DriveGroupSpec, List[str]) -> RookWriteCompletion
assert len(drive_group.hosts(all_hosts)) == 1
+ targets = []
+ if drive_group.data_devices:
+ targets += drive_group.data_devices.paths
+ if drive_group.data_directories:
+ targets += drive_group.data_directories
+
if not self.rook_cluster.node_exists(drive_group.hosts(all_hosts)[0]):
raise RuntimeError("Node '{0}' is not in the Kubernetes "
"cluster".format(drive_group.hosts(all_hosts)))
continue
metadata = self.get_metadata('osd', "%s" % osd_id)
- if metadata and metadata['devices'] in drive_group.data_devices.paths:
+ if metadata and metadata['devices'] in targets:
found.append(osd_id)
else:
self.log.info("ignoring osd {0} {1}".format(
return RookWriteCompletion(execute, is_complete,
"Creating OSD on {0}:{1}".format(
- drive_group.hosts(all_hosts)[0],
- drive_group.data_devices.paths
+ drive_group.hosts(all_hosts)[0], targets
))
Rook currently (0.8) can only do single-drive OSDs, so we
treat all drive groups as just a list of individual OSDs.
"""
- block_devices = drive_group.data_devices.paths
+ block_devices = drive_group.data_devices.paths if drive_group.data_devices else None
+ directories = drive_group.data_directories
assert drive_group.objectstore in ("bluestore", "filestore")
current_nodes = current_cluster['spec']['storage'].get('nodes', [])
if drive_group.hosts(all_hosts)[0] not in [n['name'] for n in current_nodes]:
- patch.append({
- "op": "add", "path": "/spec/storage/nodes/-", "value": {
- "name": drive_group.hosts(all_hosts)[0],
- "devices": [{'name': d} for d in block_devices],
- "storeConfig": {
- "storeType": drive_group.objectstore
- }
- }
- })
+ pd = { "name": drive_group.hosts(all_hosts)[0],
+ "storeConfig": { "storeType": drive_group.objectstore }}
+
+ if block_devices:
+ pd["devices"] = [{'name': d} for d in block_devices]
+ if directories:
+ pd["directories"] = [{'path': p} for p in directories]
+
+ patch.append({ "op": "add", "path": "/spec/storage/nodes/-", "value": pd })
else:
# Extend existing node
node_idx = None
assert current_node is not None
new_devices = list(set(block_devices) - set([d['name'] for d in current_node['devices']]))
-
for n in new_devices:
patch.append({
"op": "add",
"value": {'name': n}
})
+ new_dirs = list(set(directories) - set(current_node['directories']))
+ for p in new_dirs:
+ patch.append({
+ "op": "add",
+ "path": "/spec/storage/nodes/{0}/directories/-".format(node_idx),
+ "value": {'path': p}
+ })
+
if len(patch) == 0:
return "No change"