From: Juan Miguel Olmo Martínez Date: Thu, 11 Feb 2021 12:19:07 +0000 (+0100) Subject: cephadm: Allow to use paths in all <_devices> drivegroup sections X-Git-Tag: v15.2.13~3^2~7^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=8e9a30e77105da3633865e105090ef58a62b30f0;p=ceph.git cephadm: Allow to use paths in all <_devices> drivegroup sections This will allow to use specific device paths in any osd component. Signed-off-by: Juan Miguel Olmo Martínez (cherry picked from commit d02683efbfcd5aa1437ebb9e73f89db38f7e7aa6) --- diff --git a/doc/cephadm/drivegroups.rst b/doc/cephadm/drivegroups.rst index 845898843d21d..aded823dd5852 100644 --- a/doc/cephadm/drivegroups.rst +++ b/doc/cephadm/drivegroups.rst @@ -319,7 +319,7 @@ This can be described with two layouts. db_devices: model: MC-55-44-XZ limit: 2 (db_slots is actually to be favoured here, but it's not implemented yet) - --- + --- service_type: osd service_id: osd_spec_ssd placement: @@ -376,7 +376,7 @@ You can use the 'host_pattern' key in the layout to target certain nodes. Salt t rotational: 1 db_devices: rotational: 0 - --- + --- service_type: osd service_id: osd_spec_six_to_ten placement: @@ -428,5 +428,25 @@ The OSD spec for this case would look like the following (using the `model` filt model: NVME-QQQQ-987 -This can easily be done with other filters, like `size` or `vendor` as well. +It is also possible to specify directly device paths in specific hosts like the following: + +.. code-block:: yaml + service_type: osd + service_id: osd_using_paths + placement: + hosts: + - Node01 + - Node02 + data_devices: + paths: + - /dev/sdb + db_devices: + paths: + - /dev/sdc + wal_devices: + paths: + - /dev/sdd + + +This can easily be done with other filters, like `size` or `vendor` as well. diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index d610372981a39..ae17dd3201fe0 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -500,6 +500,46 @@ class TestCephadm(object): _run_cephadm.assert_called_with( 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) + @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm") + def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.return_value = ('{}', '', 0) + with with_host(cephadm_module, 'test'): + + spec = DriveGroupSpec( + service_id='noncollocated', + placement=PlacementSpec( + hosts=['test'] + ), + data_devices=DeviceSelection(paths=['/dev/sdb']), + db_devices=DeviceSelection(paths=['/dev/sdc']), + wal_devices=DeviceSelection(paths=['/dev/sdd']) + ) + + c = cephadm_module.apply([spec]) + assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...'] + + inventory = Devices([ + Device('/dev/sdb', available=True), + Device('/dev/sdc', available=True), + Device('/dev/sdd', available=True) + ]) + + cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {}) + + _run_cephadm.return_value = (['{}'], '', 0) + + assert CephadmServe(cephadm_module)._apply_all_services() is False + + _run_cephadm.assert_any_call( + 'test', 'osd', 'ceph-volume', + ['--config-json', '-', '--', 'lvm', 'batch', + '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc', + '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'], + env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'], + error_ok=True, stdin='{"config": "", "keyring": ""}') + _run_cephadm.assert_called_with( + 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json']) + @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) @mock.patch("cephadm.module.SpecStore.save") def test_apply_osd_save_placement(self, _save_spec, cephadm_module): @@ -521,6 +561,15 @@ class TestCephadm(object): out = wait(cephadm_module, c) assert out == "Created no osd(s) on host test; already created?" + @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) + def test_create_noncollocated_osd(self, cephadm_module): + with with_host(cephadm_module, 'test'): + dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'), + data_devices=DeviceSelection(paths=[''])) + c = cephadm_module.create_osds(dg) + out = wait(cephadm_module, c) + assert out == "Created no osd(s) on host test; already created?" + @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}')) def test_prepare_drivegroup(self, cephadm_module): with with_host(cephadm_module, 'test'): diff --git a/src/python-common/ceph/deployment/drive_group.py b/src/python-common/ceph/deployment/drive_group.py index f2636c7f838ab..052e4cc143b22 100644 --- a/src/python-common/ceph/deployment/drive_group.py +++ b/src/python-common/ceph/deployment/drive_group.py @@ -289,8 +289,6 @@ class DriveGroupSpec(ServiceSpec): for s in filter(None, specs): s.validate() for s in filter(None, [self.db_devices, self.wal_devices, self.journal_devices]): - if s.paths: - raise DriveGroupValidationError("`paths` is only allowed for data_devices") if s.all: raise DriveGroupValidationError("`all` is only allowed for data_devices") diff --git a/src/python-common/ceph/deployment/drive_selection/selector.py b/src/python-common/ceph/deployment/drive_selection/selector.py index e7e2edf7e5994..a54df3268a823 100644 --- a/src/python-common/ceph/deployment/drive_selection/selector.py +++ b/src/python-common/ceph/deployment/drive_selection/selector.py @@ -23,18 +23,10 @@ class DriveSelection(object): self.spec = spec self.existing_daemons = existing_daemons or 0 - if self.spec.data_devices.paths: # type: ignore - # re: type: ignore there is *always* a path attribute assigned to DeviceSelection - # it's just None if actual drivegroups are used - self._data = self.spec.data_devices.paths # type: ignore - self._db = [] # type: List - self._wal = [] # type: List - self._journal = [] # type: List - else: - self._data = self.assign_devices(self.spec.data_devices) - self._wal = self.assign_devices(self.spec.wal_devices) - self._db = self.assign_devices(self.spec.db_devices) - self._journal = self.assign_devices(self.spec.journal_devices) + self._data = self.assign_devices(self.spec.data_devices) + self._wal = self.assign_devices(self.spec.wal_devices) + self._db = self.assign_devices(self.spec.db_devices) + self._journal = self.assign_devices(self.spec.journal_devices) def data_devices(self): # type: () -> List[Device] @@ -111,6 +103,10 @@ class DriveSelection(object): logger.debug('data_devices is None') return [] + if device_filter.paths: + logger.debug('device filter is using explicit paths') + return device_filter.paths + devices = list() # type: List[Device] for disk in self.disks: logger.debug("Processing disk {}".format(disk.path))