]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: remove restriction for crush device classes 56087/head
authorSeena Fallah <seenafallah@gmail.com>
Sun, 11 Feb 2024 21:50:05 +0000 (22:50 +0100)
committerAdam King <adking@redhat.com>
Sun, 10 Mar 2024 19:19:53 +0000 (15:19 -0400)
A restriction has been introduced here (https://github.com/ceph/ceph/commit/6c6cb2f5130dbcf8e42cf03666173948411fc92b) which doesn't let OSDs be created with custom crush device classes.
Crush Device Class is the key that helps the crush distinguish between multiple storage classes, so it must accept any custom names.

Fixes: https://tracker.ceph.com/issues/64382
Signed-off-by: Seena Fallah <seenafallah@gmail.com>
(cherry picked from commit 5999196f37bc5cb12de26d5f0aa077229e3ffc42)

Conflicts:
src/python-common/ceph/deployment/translate.py

src/python-common/ceph/deployment/translate.py
src/python-common/ceph/tests/test_drive_group.py

index 0f404af305b103e6ad14649027bc6b73a9698169..b8587e0c6522e0581cad249447b4c9be01ab2133 100644 (file)
@@ -13,9 +13,7 @@ logger = logging.getLogger(__name__)
 # TODO refactor this to a DriveSelection method
 class to_ceph_volume(object):
 
-    _supported_device_classes = [
-        "hdd", "ssd", "nvme"
-    ]
+    NO_CRUSH = '_NO_CRUSH'
 
     def __init__(self,
                  selection,  # type: DriveSelection
@@ -34,20 +32,6 @@ class to_ceph_volume(object):
 
         lvcount: Dict[str, List[str]] = dict()
 
-        """
-        Default entry for the global crush_device_class definition;
-        if there's no global definition at spec level, we do not want
-        to apply anything to the provided devices, hence we need to run
-        a ceph-volume command without that option, otherwise we init an
-        entry for the globally defined crush_device_class.
-        """
-        if self.spec.crush_device_class:
-            lvcount[self.spec.crush_device_class] = []
-
-        # entry where the drives that don't require a crush_device_class
-        # option are collected
-        lvcount["no_crush"] = []
-
         """
         for each device, check if it's just a path or it has a crush_device
         class definition, and append an entry to the right crush_device_
@@ -57,35 +41,16 @@ class to_ceph_volume(object):
             # iterate on List[Device], containing both path and
             # crush_device_class
             path = device.path
-            crush_device_class = device.crush_device_class
+            crush_device_class = (
+                device.crush_device_class
+                or self.spec.crush_device_class
+                or self.NO_CRUSH
+            )
 
             if path is None:
                 raise ValueError("Device path can't be empty")
 
-            """
-            if crush_device_class is specified for the current Device path
-            we should either init the array for this group or append the
-            drive path to the existing entry
-            """
-            if crush_device_class:
-                if crush_device_class in lvcount.keys():
-                    lvcount[crush_device_class].append(path)
-                else:
-                    lvcount[crush_device_class] = [path]
-                continue
-
-            """
-            if no crush_device_class is specified for the current path
-            but a global definition is present in the spec, so we group
-            the drives together
-            """
-            if crush_device_class is None and self.spec.crush_device_class:
-                lvcount[self.spec.crush_device_class].append(path)
-                continue
-            else:
-                # default use case
-                lvcount["no_crush"].append(path)
-                continue
+            lvcount.setdefault(crush_device_class, []).append(path)
 
         return lvcount
 
@@ -157,7 +122,7 @@ class to_ceph_volume(object):
                             cmd += " --block.db {}".format(db_devices.pop())
                         if wal_devices:
                             cmd += " --block.wal {}".format(wal_devices.pop())
-                        if d in self._supported_device_classes:
+                        if d != self.NO_CRUSH:
                             cmd += " --crush-device-class {}".format(d)
 
                         cmds.append(cmd)
@@ -180,7 +145,7 @@ class to_ceph_volume(object):
                     if self.spec.block_db_size:
                         cmd += " --block-db-size {}".format(self.spec.block_db_size)
 
-                    if d in self._supported_device_classes:
+                    if d != self.NO_CRUSH:
                         cmd += " --crush-device-class {}".format(d)
                     cmds.append(cmd)
 
@@ -201,17 +166,6 @@ class to_ceph_volume(object):
                 cmds[i] += " --yes"
                 cmds[i] += " --no-systemd"
 
-            # set the --crush-device-class option when:
-            # - crush_device_class is specified at spec level (global for all the osds)  # noqa E501
-            # - crush_device_class is allowed
-            # - there's no override at osd level
-            if (
-                    self.spec.crush_device_class and
-                    self.spec.crush_device_class in self._supported_device_classes and  # noqa E501
-                    "crush-device-class" not in cmds[i]
-               ):
-                cmds[i] += " --crush-device-class {}".format(self.spec.crush_device_class)  # noqa E501
-
             if self.preview:
                 cmds[i] += " --report"
                 cmds[i] += " --format json"
index 77e9b4083d495d44e0582878b092d97946c7c154..cd4a238af0d59426e4e5b68071d2c0b6abdcb383 100644 (file)
@@ -392,8 +392,12 @@ def test_ceph_volume_command_12(test_input2):
     drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
     cmds = translate.to_ceph_volume(drive, []).run()
 
-    assert (cmds[0] == 'lvm batch --no-auto /dev/sdb --crush-device-class ssd --yes --no-systemd')  # noqa E501
-    assert (cmds[1] == 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd')  # noqa E501
+    expected_cmds = [
+        'lvm batch --no-auto /dev/sdb --crush-device-class ssd --yes --no-systemd',
+        'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+    ]
+    assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
+    assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'
 
 
 @pytest.mark.parametrize("test_input3",
@@ -418,8 +422,12 @@ def test_ceph_volume_command_13(test_input3):
     drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
     cmds = translate.to_ceph_volume(drive, []).run()
 
-    assert (cmds[0] == 'lvm batch --no-auto /dev/sdb --yes --no-systemd')  # noqa E501
-    assert (cmds[1] == 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd')  # noqa E501
+    expected_cmds = [
+        'lvm batch --no-auto /dev/sdb --yes --no-systemd',
+        'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+    ]
+    assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
+    assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'
 
 
 @pytest.mark.parametrize("test_input4",