]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
python-common: fix raw OSD prepare to use osd-id instead of osd-ids 67842/head
authorYonatan Zaken <yzaken@redhat.com>
Tue, 17 Mar 2026 10:15:39 +0000 (12:15 +0200)
committerYonatan Zaken <yzaken@redhat.com>
Tue, 17 Mar 2026 10:15:39 +0000 (12:15 +0200)
ceph-volume raw prepare does not support --osd-ids (plural), only
--osd-id (singular) per device since for raw prepare each data device
needs its own prepare command. When replacing a destroyed OSD using
"ceph orch daemon add osd <host>:<dev> raw", the wrong argument was
passed causing ceph-volume to fail with "unrecognized arguments:
--osd-ids".

Fix the osd_id_claims handling in translate.py to use --osd-id
(singular, per device) for raw mode, and retain --osd-ids (plural)
for lvm batch mode.

Fixes: https://tracker.ceph.com/issues/69284
Signed-off-by: Yonatan Zaken <yzaken@redhat.com>
src/python-common/ceph/deployment/translate.py
src/python-common/ceph/tests/test_drive_group.py

index 7b28efbf63149dd2975c846d696c1c1004cbf0e7..ae8b6783798ac825d86ae7b44425c26a24e036ae 100644 (file)
@@ -155,7 +155,13 @@ class to_ceph_volume(object):
                 cmds[i] += " --data-allocate-fraction {}".format(self.spec.data_allocate_fraction)
 
             if self.osd_id_claims:
-                cmds[i] += " --osd-ids {}".format(" ".join(self.osd_id_claims))
+                if self.spec.method == 'raw':
+                    # raw prepare expects --osd-id (singular) for each device
+                    if i < len(self.osd_id_claims):
+                        cmds[i] += " --osd-id {}".format(self.osd_id_claims[i])
+                else:
+                    # lvm batch expects --osd-ids (plural) with all ids
+                    cmds[i] += " --osd-ids {}".format(" ".join(self.osd_id_claims))
 
             if self.spec.method != 'raw':
                 cmds[i] += " --yes"
index 528ec24880533f3ce5a98cb9132564e53601eaf1..1ccd476b9a39bdf7b9e23892a7988807df423316 100644 (file)
@@ -612,6 +612,40 @@ def test_raw_ceph_volume_command_4(test_input7):
     assert cmds[2] == 'raw prepare --bluestore --data /dev/sdc --block.db /dev/sde --block.wal /dev/sdh --crush-device-class ssd --osd-type classic'
 
 
+def test_raw_ceph_volume_command_5():
+    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
+                          service_id='foobar',
+                          data_devices=DeviceSelection(rotational=True),
+                          method='raw',
+                          osd_id_claims={'host1': ['0', '1']},
+                          )
+    spec.validate()
+    inventory = _mk_inventory(_mk_device(rotational=True) +
+                              _mk_device(rotational=True)
+                              )
+    sel = drive_selection.DriveSelection(spec, inventory)
+    cmds = translate.to_ceph_volume(sel, ['0', '1']).run()
+    assert cmds[0] == 'raw prepare --bluestore --data /dev/sda --osd-type classic --osd-id 0'
+    assert cmds[1] == 'raw prepare --bluestore --data /dev/sdb --osd-type classic --osd-id 1'
+
+
+def test_raw_ceph_volume_command_6():
+    spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
+                          service_id='foobar',
+                          data_devices=DeviceSelection(rotational=True),
+                          method='raw',
+                          osd_id_claims={'host1': ['0']},
+                          )
+    spec.validate()
+    inventory = _mk_inventory(_mk_device(rotational=True) +
+                              _mk_device(rotational=True)
+                              )
+    sel = drive_selection.DriveSelection(spec, inventory)
+    cmds = translate.to_ceph_volume(sel, ['0']).run()
+    assert cmds[0] == 'raw prepare --bluestore --data /dev/sda --osd-type classic --osd-id 0'
+    assert cmds[1] == 'raw prepare --bluestore --data /dev/sdb --osd-type classic'
+
+
 def test_ceph_volume_command_seastore():
     spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
                           service_id='foobar',