]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm, ceph-volume: add tests for crimson OSD support 67290/head
authorShraddha Agrawal <shraddha.agrawal000@gmail.com>
Tue, 10 Feb 2026 13:02:10 +0000 (18:32 +0530)
committerShraddha Agrawal <shraddha.agrawal000@gmail.com>
Tue, 10 Feb 2026 13:12:50 +0000 (18:42 +0530)
This commit adds tests for the crimson OSD support in cephadm and ceph-volume.
The following tests are added for the same:

1. cephadm: DriveGroupSpec validation checks for osd_type.
2. cephadm: entrypoint verification in runfile.
3. cephadm to ceph-volume: command verification when osd_type is specified in spec.
4. ceph-volume: binary selection verification for mkfs cmd.

Fixes: https://tracker.ceph.com/issues/74851
Signed-off-by: Shraddha Agrawal <shraddha.agrawal000@gmail.com>
src/ceph-volume/ceph_volume/tests/objectstore/test_baseobjectstore.py
src/cephadm/tests/test_deploy.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/python-common/ceph/tests/test_drive_group.py

index fba3bbc48f8457f33945f7c0b17b5ee564bc279b..d34e8cde06faad30094df645488ada90b4b31a71 100644 (file)
@@ -88,6 +88,16 @@ class TestBaseObjectStore:
         bo.osd_id = '123'
         assert bo.get_osd_path() == '/var/lib/ceph/osd/ceph-123/'
 
+    def test_get_default_entrypoint_cmd_classic(self):
+        bo = BaseObjectStore([])
+        bo.osd_type = 'classic'
+        assert bo.get_default_entrypoint_cmd() == 'ceph-osd'
+
+    def test_get_default_entrypoint_cmd_crimson(self):
+        bo = BaseObjectStore([])
+        bo.osd_type = 'crimson'
+        assert bo.get_default_entrypoint_cmd() == 'ceph-osd-crimson'
+
     @patch('ceph_volume.conf.cluster', 'ceph')
     def test_build_osd_mkfs_cmd_base(self):
         bo = BaseObjectStore([])
@@ -114,6 +124,32 @@ class TestBaseObjectStore:
                           '--setuser', 'ceph',
                           '--setgroup', 'ceph']
 
+    @patch('ceph_volume.conf.cluster', 'ceph')
+    def test_build_osd_mkfs_cmd_crimson(self):
+        bo = BaseObjectStore([])
+        bo.osd_path = '/var/lib/ceph/osd/ceph-123/'
+        bo.osd_fsid = 'abcd-1234'
+        bo.objectstore = 'my-fake-objectstore'
+        bo.osd_id = '123'
+        bo.monmap = '/etc/ceph/ceph.monmap'
+        bo.osd_type = 'crimson'
+        result = bo.build_osd_mkfs_cmd()
+
+        assert result == ['ceph-osd-crimson',
+                          '--cluster',
+                          'ceph',
+                          '--osd-objectstore',
+                          'my-fake-objectstore',
+                          '--mkfs', '-i', '123',
+                          '--monmap',
+                          '/etc/ceph/ceph.monmap',
+                          '--keyfile', '-',
+                          '--osd-data',
+                          '/var/lib/ceph/osd/ceph-123/',
+                          '--osd-uuid', 'abcd-1234',
+                          '--setuser', 'ceph',
+                          '--setgroup', 'ceph']
+
     def test_osd_mkfs_ok(self, monkeypatch, fake_call, objectstore):
         args = objectstore(dmcrypt=False)
         bo = BaseObjectStore(args)
index 1736639ed552708eac3b7d180c6a64bb2ea3d79a..c388a721b838e31f347406089a5cf0dbe3f1f55b 100644 (file)
@@ -446,6 +446,31 @@ def test_deploy_ceph_osd_container(cephadm_fs, funkypatch):
     assert _make_run_dir.call_args[0][2] == 8765
 
 
+def test_deploy_ceph_osd_container_crimson(cephadm_fs, funkypatch):
+    mocks = _common_patches(funkypatch)
+    _make_run_dir = funkypatch.patch('cephadmlib.file_utils.make_run_dir')
+    fsid = 'b01dbeef-701d-9abe-0000-e1e5a47004a7'
+    with with_cephadm_ctx([]) as ctx:
+        ctx.container_engine = mock_podman()
+        ctx.fsid = fsid
+        ctx.name = 'osd.quux'
+        ctx.image = 'quay.io/ceph/ceph:latest'
+        ctx.reconfig = False
+        ctx.allow_ptrace = False
+        ctx.osd_fsid = '00000000-0000-0000-0000-000000000000'
+        ctx.config_blobs = {
+            'config': 'XXXXXXX',
+            'keyring': 'YYYYYY',
+            'osd_type': 'crimson',
+        }
+        _cephadm._common_deploy(ctx)
+
+    basedir = pathlib.Path(f'/var/lib/ceph/{fsid}/osd.quux')
+    with open(basedir / 'unit.run') as f:
+        runfile_lines = f.read().splitlines()
+    assert '--entrypoint /usr/bin/ceph-osd-crimson' in runfile_lines[-1]
+
+
 def test_deploy_ceph_exporter_container(cephadm_fs, funkypatch):
     mocks = _common_patches(funkypatch)
     _firewalld = mocks['Firewalld']
index a9d40ec7f07d5020f23226bc3d6b141cf62a02ab..d69d53d3bd2987090e71cb1885c7c30afaca13d1 100644 (file)
@@ -1290,6 +1290,27 @@ class TestCephadm(object):
             assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
                        for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
 
+    @pytest.mark.parametrize(
+        "devices, preview, exp_commands",
+        [
+            # no preview and only one disk, crimson osd type
+            (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --objectstore bluestore --osd-type crimson --yes --no-systemd"]),
+            # no preview and multiple disks, crimson osd type
+            (['/dev/sda', '/dev/sdb'], False,
+             ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --osd-type crimson --yes --no-systemd"]),
+        ]
+    )
+    @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
+    def test_driveselection_to_ceph_volume_crimson(self, cephadm_module, devices, preview, exp_commands):
+        with with_host(cephadm_module, 'test'):
+            dg = DriveGroupSpec(service_id='test.spec', placement=PlacementSpec(
+                host_pattern='test'), data_devices=DeviceSelection(paths=devices),
+                osd_type='crimson')
+            ds = DriveSelection(dg, Devices([Device(path) for path in devices]))
+            out = cephadm_module.osd_service.driveselection_to_ceph_volume(ds, [], preview)
+            assert all(any(cmd in exp_cmd for exp_cmd in exp_commands)
+                       for cmd in out), f'Expected cmds from f{out} in {exp_commands}'
+
     @pytest.mark.parametrize(
         "devices, preview, exp_commands",
         [
index afcc5b1fec1081294a5e6ca6884bea17252c4e29..a26bf2150709b1d234288459019ba48bbb8b65cf 100644 (file)
@@ -7,7 +7,7 @@ import yaml
 from ceph.deployment import drive_selection, translate
 from ceph.deployment.hostspec import HostSpec, SpecValidationError
 from ceph.deployment.inventory import Device
-from ceph.deployment.service_spec import PlacementSpec
+from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
 from ceph.tests.utils import _mk_inventory, _mk_device
 from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, \
     DriveGroupValidationError
@@ -449,6 +449,19 @@ def test_ceph_volume_command_14(test_input4):
         spec.validate()
 
 
+def test_ceph_volume_command_15():
+    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
+                          service_id='foobar',
+                          data_devices=DeviceSelection(all=True),
+                          osd_type='crimson',
+                          )
+    spec.validate()
+    inventory = _mk_inventory(_mk_device()*2)
+    sel = drive_selection.DriveSelection(spec, inventory)
+    cmds = translate.to_ceph_volume(sel, []).run()
+    assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --osd-type crimson --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+
+
 def test_raw_ceph_volume_command_0():
     spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                           service_id='foobar',
@@ -597,3 +610,32 @@ def test_raw_ceph_volume_command_4(test_input7):
     assert cmds[0] == 'raw prepare --bluestore --data /dev/sda --block.db /dev/sdd --block.wal /dev/sdg --crush-device-class hdd'
     assert cmds[1] == 'raw prepare --bluestore --data /dev/sdb --block.db /dev/sdf --block.wal /dev/sdi --crush-device-class nvme'
     assert cmds[2] == 'raw prepare --bluestore --data /dev/sdc --block.db /dev/sde --block.wal /dev/sdh --crush-device-class ssd'
+
+
+def test_drive_group_osd_type_invalid():
+    spec = DriveGroupSpec(
+        placement=PlacementSpec(host_pattern='*'),
+        service_id='foobar',
+        data_devices=DeviceSelection(all=True),
+        osd_type='invalid',
+    )
+    with pytest.raises(DriveGroupValidationError, match='osd_type must be one of'):
+        spec.validate()
+
+
+def test_drive_group_osd_type_crimson_roundtrip():
+    spec = DriveGroupSpec(
+        placement=PlacementSpec(host_pattern='*'),
+        service_id='foobar',
+        data_devices=DeviceSelection(all=True),
+        osd_type='crimson',
+    )
+    spec.validate()
+
+    j = spec.to_json()
+    assert j['spec']['osd_type'] == 'crimson'
+
+    spec2 = ServiceSpec.from_json(j)
+    assert spec2.osd_type == 'crimson'
+    j2 = spec2.to_json()
+    assert j2['spec']['osd_type'] == 'crimson'