From: Yonatan Zaken Date: Tue, 17 Mar 2026 10:15:39 +0000 (+0200) Subject: python-common: fix raw OSD prepare to use osd-id instead of osd-ids X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=00e2ce86079c5bda84c53560b921198fa1ff906f;p=ceph.git python-common: fix raw OSD prepare to use osd-id instead of osd-ids ceph-volume raw prepare does not support --osd-ids (plural), only --osd-id (singular) per device since for raw prepare each data device needs its own prepare command. When replacing a destroyed OSD using "ceph orch daemon add osd : raw", the wrong argument was passed causing ceph-volume to fail with "unrecognized arguments: --osd-ids". Fix the osd_id_claims handling in translate.py to use --osd-id (singular, per device) for raw mode, and retain --osd-ids (plural) for lvm batch mode. Fixes: https://tracker.ceph.com/issues/69284 Signed-off-by: Yonatan Zaken --- diff --git a/src/python-common/ceph/deployment/translate.py b/src/python-common/ceph/deployment/translate.py index 7b28efbf6314..ae8b6783798a 100644 --- a/src/python-common/ceph/deployment/translate.py +++ b/src/python-common/ceph/deployment/translate.py @@ -155,7 +155,13 @@ class to_ceph_volume(object): cmds[i] += " --data-allocate-fraction {}".format(self.spec.data_allocate_fraction) if self.osd_id_claims: - cmds[i] += " --osd-ids {}".format(" ".join(self.osd_id_claims)) + if self.spec.method == 'raw': + # raw prepare expects --osd-id (singular) for each device + if i < len(self.osd_id_claims): + cmds[i] += " --osd-id {}".format(self.osd_id_claims[i]) + else: + # lvm batch expects --osd-ids (plural) with all ids + cmds[i] += " --osd-ids {}".format(" ".join(self.osd_id_claims)) if self.spec.method != 'raw': cmds[i] += " --yes" diff --git a/src/python-common/ceph/tests/test_drive_group.py b/src/python-common/ceph/tests/test_drive_group.py index 528ec2488053..1ccd476b9a39 100644 --- a/src/python-common/ceph/tests/test_drive_group.py +++ b/src/python-common/ceph/tests/test_drive_group.py @@ -612,6 +612,40 @@ def test_raw_ceph_volume_command_4(test_input7): assert cmds[2] == 'raw prepare --bluestore --data /dev/sdc --block.db /dev/sde --block.wal /dev/sdh --crush-device-class ssd --osd-type classic' +def test_raw_ceph_volume_command_5(): + spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'), + service_id='foobar', + data_devices=DeviceSelection(rotational=True), + method='raw', + osd_id_claims={'host1': ['0', '1']}, + ) + spec.validate() + inventory = _mk_inventory(_mk_device(rotational=True) + + _mk_device(rotational=True) + ) + sel = drive_selection.DriveSelection(spec, inventory) + cmds = translate.to_ceph_volume(sel, ['0', '1']).run() + assert cmds[0] == 'raw prepare --bluestore --data /dev/sda --osd-type classic --osd-id 0' + assert cmds[1] == 'raw prepare --bluestore --data /dev/sdb --osd-type classic --osd-id 1' + + +def test_raw_ceph_volume_command_6(): + spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'), + service_id='foobar', + data_devices=DeviceSelection(rotational=True), + method='raw', + osd_id_claims={'host1': ['0']}, + ) + spec.validate() + inventory = _mk_inventory(_mk_device(rotational=True) + + _mk_device(rotational=True) + ) + sel = drive_selection.DriveSelection(spec, inventory) + cmds = translate.to_ceph_volume(sel, ['0']).run() + assert cmds[0] == 'raw prepare --bluestore --data /dev/sda --osd-type classic --osd-id 0' + assert cmds[1] == 'raw prepare --bluestore --data /dev/sdb --osd-type classic' + + def test_ceph_volume_command_seastore(): spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'), service_id='foobar',