]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: Allow to use paths in all <_devices> drivegroup sections 40838/head
authorJuan Miguel Olmo Martínez <jolmomar@redhat.com>
Thu, 11 Feb 2021 12:19:07 +0000 (13:19 +0100)
committerMichael Fritch <mfritch@suse.com>
Tue, 13 Apr 2021 17:47:57 +0000 (11:47 -0600)
This will allow to use specific device paths in any osd component.

Signed-off-by: Juan Miguel Olmo Martínez <jolmomar@redhat.com>
(cherry picked from commit d02683efbfcd5aa1437ebb9e73f89db38f7e7aa6)

doc/cephadm/drivegroups.rst
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/python-common/ceph/deployment/drive_group.py
src/python-common/ceph/deployment/drive_selection/selector.py

index 845898843d21d1d9d72e6a9a5e3332f4279d40e4..aded823dd5852947ea5956a41418e348239bfa44 100644 (file)
@@ -319,7 +319,7 @@ This can be described with two layouts.
     db_devices:
       model: MC-55-44-XZ
       limit: 2 (db_slots is actually to be favoured here, but it's not implemented yet)
-    ---  
+    ---
     service_type: osd
     service_id: osd_spec_ssd
     placement:
@@ -376,7 +376,7 @@ You can use the 'host_pattern' key in the layout to target certain nodes. Salt t
       rotational: 1
     db_devices:
       rotational: 0
-    ---    
+    ---
     service_type: osd
     service_id: osd_spec_six_to_ten
     placement:
@@ -428,5 +428,25 @@ The OSD spec for this case would look like the following (using the `model` filt
       model: NVME-QQQQ-987
 
 
-This can easily be done with other filters, like `size` or `vendor` as well.
+It is also possible to specify directly device paths in specific hosts like the following:
+
+.. code-block:: yaml
 
+    service_type: osd
+    service_id: osd_using_paths
+    placement:
+      hosts:
+        - Node01
+        - Node02
+    data_devices:
+      paths:
+        - /dev/sdb
+    db_devices:
+      paths:
+        - /dev/sdc
+    wal_devices:
+      paths:
+        - /dev/sdd
+
+
+This can easily be done with other filters, like `size` or `vendor` as well.
index d610372981a3926a49acd2b01d505536a4da12f0..ae17dd3201fe04ac2adc6cf3919068430013a07a 100644 (file)
@@ -500,6 +500,46 @@ class TestCephadm(object):
             _run_cephadm.assert_called_with(
                 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
 
+    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
+    def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+        _run_cephadm.return_value = ('{}', '', 0)
+        with with_host(cephadm_module, 'test'):
+
+            spec = DriveGroupSpec(
+                service_id='noncollocated',
+                placement=PlacementSpec(
+                    hosts=['test']
+                ),
+                data_devices=DeviceSelection(paths=['/dev/sdb']),
+                db_devices=DeviceSelection(paths=['/dev/sdc']),
+                wal_devices=DeviceSelection(paths=['/dev/sdd'])
+            )
+
+            c = cephadm_module.apply([spec])
+            assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...']
+
+            inventory = Devices([
+                Device('/dev/sdb', available=True),
+                Device('/dev/sdc', available=True),
+                Device('/dev/sdd', available=True)
+            ])
+
+            cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {})
+
+            _run_cephadm.return_value = (['{}'], '', 0)
+
+            assert CephadmServe(cephadm_module)._apply_all_services() is False
+
+            _run_cephadm.assert_any_call(
+                'test', 'osd', 'ceph-volume',
+                ['--config-json', '-', '--', 'lvm', 'batch',
+                    '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
+                    '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
+                env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
+                error_ok=True, stdin='{"config": "", "keyring": ""}')
+            _run_cephadm.assert_called_with(
+                'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
+
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
     @mock.patch("cephadm.module.SpecStore.save")
     def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
@@ -521,6 +561,15 @@ class TestCephadm(object):
             out = wait(cephadm_module, c)
             assert out == "Created no osd(s) on host test; already created?"
 
+    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    def test_create_noncollocated_osd(self, cephadm_module):
+        with with_host(cephadm_module, 'test'):
+            dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
+                                data_devices=DeviceSelection(paths=['']))
+            c = cephadm_module.create_osds(dg)
+            out = wait(cephadm_module, c)
+            assert out == "Created no osd(s) on host test; already created?"
+
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
     def test_prepare_drivegroup(self, cephadm_module):
         with with_host(cephadm_module, 'test'):
index f2636c7f838ab97ed408e38c72409d1056e8c088..052e4cc143b220879123cbcfb9b0e68c48d1166c 100644 (file)
@@ -289,8 +289,6 @@ class DriveGroupSpec(ServiceSpec):
         for s in filter(None, specs):
             s.validate()
         for s in filter(None, [self.db_devices, self.wal_devices, self.journal_devices]):
-            if s.paths:
-                raise DriveGroupValidationError("`paths` is only allowed for data_devices")
             if s.all:
                 raise DriveGroupValidationError("`all` is only allowed for data_devices")
 
index e7e2edf7e59941e63f4c08c8e5573a89a3136098..a54df3268a823cbd0076a0390028427d2d0acdee 100644 (file)
@@ -23,18 +23,10 @@ class DriveSelection(object):
         self.spec = spec
         self.existing_daemons = existing_daemons or 0
 
-        if self.spec.data_devices.paths:  # type: ignore
-            # re: type: ignore there is *always* a path attribute assigned to DeviceSelection
-            # it's just None if actual drivegroups are used
-            self._data = self.spec.data_devices.paths  # type: ignore
-            self._db = []  # type: List
-            self._wal = []  # type: List
-            self._journal = []  # type: List
-        else:
-            self._data = self.assign_devices(self.spec.data_devices)
-            self._wal = self.assign_devices(self.spec.wal_devices)
-            self._db = self.assign_devices(self.spec.db_devices)
-            self._journal = self.assign_devices(self.spec.journal_devices)
+        self._data = self.assign_devices(self.spec.data_devices)
+        self._wal = self.assign_devices(self.spec.wal_devices)
+        self._db = self.assign_devices(self.spec.db_devices)
+        self._journal = self.assign_devices(self.spec.journal_devices)
 
     def data_devices(self):
         # type: () -> List[Device]
@@ -111,6 +103,10 @@ class DriveSelection(object):
             logger.debug('data_devices is None')
             return []
 
+        if device_filter.paths:
+            logger.debug('device filter is using explicit paths')
+            return device_filter.paths
+
         devices = list()  # type: List[Device]
         for disk in self.disks:
             logger.debug("Processing disk {}".format(disk.path))