From 7b6c3337af12bcf6d10857ba59fd452054645430 Mon Sep 17 00:00:00 2001 From: Francesco Pantano Date: Fri, 17 Feb 2023 15:02:12 +0100 Subject: [PATCH] Remove the filestore section from ceph-volume Filestore is no longer supported in cephadm and both the doc [1] and the DriveGroupValidation [2] raise an exception if this method is used. This patch removes the legacy code that is supposed to produce filestore ceph-volume related commands. [1] https://github.com/ceph/ceph/blob/main/doc/cephadm/adoption.rst#limitations [2] https://github.com/ceph/ceph/blob/main/src/python-common/ceph/deployment/drive_group.py#L366-L369 Signed-off-by: Francesco Pantano (cherry picked from commit 6e5bef1b1d69ec6e8be9b0fa2f4f47491df80687) --- .../ceph/deployment/translate.py | 103 +++++++----------- 1 file changed, 41 insertions(+), 62 deletions(-) diff --git a/src/python-common/ceph/deployment/translate.py b/src/python-common/ceph/deployment/translate.py index 0f404af305b..86243b8aefd 100644 --- a/src/python-common/ceph/deployment/translate.py +++ b/src/python-common/ceph/deployment/translate.py @@ -95,7 +95,6 @@ class to_ceph_volume(object): db_devices = [x.path for x in self.selection.db_devices()] wal_devices = [x.path for x in self.selection.wal_devices()] - journal_devices = [x.path for x in self.selection.journal_devices()] if not self.selection.data_devices(): return [] @@ -116,73 +115,53 @@ class to_ceph_volume(object): raise ValueError('Number of data devices must match number of ' 'wal devices for raw mode osds') - # For this use case we don't apply any custom crush_device_classes - # Note that filestore is not supported anymore by the DriveGroupSpec - if self.spec.objectstore == 'filestore': - # for lvm batch we can just do all devices in one command - devs: List = sum(list(devices.values()), []) - - cmd = "lvm batch --no-auto" - - cmd += " {}".format(" ".join(devs)) - - if self.spec.journal_size: - cmd += " --journal-size {}".format(self.spec.journal_size) - - if journal_devices: - cmd += " --journal-devices {}".format( - ' '.join(journal_devices)) - - cmd += " --filestore" - cmds.append(cmd) - else: - for d in devices.keys(): - data_devices: Optional[List[str]] = devices.get(d) - if not data_devices: - continue - - if self.spec.method == 'raw': - assert self.spec.objectstore == 'bluestore' - # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices - # for raw prepare each data device needs its own prepare command - dev_counter = 0 - # reversing the lists as we're assigning db_devices sequentially - db_devices.reverse() - wal_devices.reverse() - - while dev_counter < len(data_devices): - cmd = "raw prepare --bluestore" - cmd += " --data {}".format(data_devices[dev_counter]) - if db_devices: - cmd += " --block.db {}".format(db_devices.pop()) - if wal_devices: - cmd += " --block.wal {}".format(wal_devices.pop()) - if d in self._supported_device_classes: - cmd += " --crush-device-class {}".format(d) - - cmds.append(cmd) - dev_counter += 1 - - elif self.spec.objectstore == 'bluestore': - # for lvm batch we can just do all devices in one command - - cmd = "lvm batch --no-auto {}".format(" ".join(data_devices)) + for d in devices.keys(): + data_devices: Optional[List[str]] = devices.get(d) + if not data_devices: + continue + if self.spec.method == 'raw': + assert self.spec.objectstore == 'bluestore' + # ceph-volume raw prepare only support 1:1 ratio of data to db/wal devices + # for raw prepare each data device needs its own prepare command + dev_counter = 0 + # reversing the lists as we're assigning db_devices sequentially + db_devices.reverse() + wal_devices.reverse() + + while dev_counter < len(data_devices): + cmd = "raw prepare --bluestore" + cmd += " --data {}".format(data_devices[dev_counter]) if db_devices: - cmd += " --db-devices {}".format(" ".join(db_devices)) - + cmd += " --block.db {}".format(db_devices.pop()) if wal_devices: - cmd += " --wal-devices {}".format(" ".join(wal_devices)) - - if self.spec.block_wal_size: - cmd += " --block-wal-size {}".format(self.spec.block_wal_size) - - if self.spec.block_db_size: - cmd += " --block-db-size {}".format(self.spec.block_db_size) - + cmd += " --block.wal {}".format(wal_devices.pop()) if d in self._supported_device_classes: cmd += " --crush-device-class {}".format(d) + cmds.append(cmd) + dev_counter += 1 + + elif self.spec.objectstore == 'bluestore': + # for lvm batch we can just do all devices in one command + + cmd = "lvm batch --no-auto {}".format(" ".join(data_devices)) + + if db_devices: + cmd += " --db-devices {}".format(" ".join(db_devices)) + + if wal_devices: + cmd += " --wal-devices {}".format(" ".join(wal_devices)) + + if self.spec.block_wal_size: + cmd += " --block-wal-size {}".format(self.spec.block_wal_size) + + if self.spec.block_db_size: + cmd += " --block-db-size {}".format(self.spec.block_db_size) + + if d in self._supported_device_classes: + cmd += " --crush-device-class {}".format(d) + cmds.append(cmd) for i in range(len(cmds)): if self.spec.encrypted: -- 2.39.5