]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: raw activate should ignore lvm backed OSD devices 68657/head
authorGuillaume Abrioux <gabrioux@ibm.com>
Tue, 28 Apr 2026 15:10:59 +0000 (17:10 +0200)
committerGuillaume Abrioux <gabrioux@ibm.com>
Wed, 29 Apr 2026 07:05:23 +0000 (09:05 +0200)
the generic activate (`ceph-volume activate`) runs the
raw path before LVM. Raw.activate was walking lsblk / raw
list entries and could hit block devices that are actually
logical volumes from `ceph-volume lvm prepare` or `lvm batch`
(with ceph lvm tags on the lv).
That made raw activation poke at LVM backed OSDs instead of
leaving it to `lvm activate`.

with this commit ceph-volume now builds the set of LV paths
that carry those tags once (`lvs` via ceph_volume_lvm_prepare_lv_paths)
and skip any candidate path that matches, so only real raw
OSDs go through the 'raw activate path'.

Also, we now pass `with_tpm` through luks_open() calls for db and
wal so encrypted metadata uses the same systemd-cryptsetup path
as the block LV when ceph.with_tpm is set.

Fixes: https://tracker.ceph.com/issues/76305
Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
src/ceph-volume/ceph_volume/api/lvm.py
src/ceph-volume/ceph_volume/objectstore/lvm.py
src/ceph-volume/ceph_volume/objectstore/raw.py
src/ceph-volume/ceph_volume/tests/api/test_api.py

index 417742a401365f2443f7c2779132cf8238d34d64..c530265e9cda4d79447442e45295004803b1be48 100644 (file)
@@ -330,6 +330,42 @@ def is_ceph_device(lv: "Volume") -> bool:
 
     return True
 
+
+def volume_is_lvm_objectstore_lv(lv: "Volume") -> bool:
+    return lv.tags.get('ceph.type') in ('block', 'db', 'wal')
+
+
+def ceph_volume_lvm_prepare_lv_paths() -> set[str]:
+    paths: set[str] = set()
+    try:
+        for lv in get_lvs():
+            if not volume_is_lvm_objectstore_lv(lv) or not lv.lv_path:
+                continue
+            paths.add(lv.lv_path)
+            try:
+                paths.add(os.path.realpath(lv.lv_path))
+            except OSError:
+                pass
+    except Exception:
+        logger.debug('get_lvs failed while building ceph LVM prepare path set',
+                     exc_info=True)
+    return paths
+
+
+def is_ceph_volume_lvm_prepared(devpath: str,
+                                prepared_lv_paths: Optional[set[str]] = None) -> bool:
+    paths = (prepared_lv_paths if prepared_lv_paths is not None
+             else ceph_volume_lvm_prepare_lv_paths())
+    if not devpath or not paths:
+        return False
+    if devpath in paths:
+        return True
+    try:
+        return os.path.realpath(devpath) in paths
+    except OSError:
+        return False
+
+
 class Lvm:
     def __init__(self, name_key: str, tags_key: str, **kw: Any) -> None:
         self.name: str = kw.get(name_key, '')
index 792adcea09f123f96fe22629b04209b3ee1fc96e..dfd908e8772d973c3ed88b3cfaf9a5884ce01101 100644 (file)
@@ -322,7 +322,8 @@ class Lvm(BaseObjectStore):
             if is_encrypted:
                 encryption_utils.luks_open(dmcrypt_secret,
                                            device_lv.__dict__['lv_path'],
-                                           device_uuid)
+                                           device_uuid,
+                                           with_tpm=self.with_tpm)
                 return '/dev/mapper/%s' % device_uuid
             return device_lv.__dict__['lv_path']
 
@@ -332,7 +333,8 @@ class Lvm(BaseObjectStore):
             if is_encrypted:
                 encryption_utils.luks_open(dmcrypt_secret,
                                            physical_device,
-                                           device_uuid)
+                                           device_uuid,
+                                           with_tpm=self.with_tpm)
                 return '/dev/mapper/%s' % device_uuid
             return physical_device
 
index 4ff17a6991320b4737b148efac17d9eafd6eebd1..b6ac8cb418f21a57cb845d39ec32218601a93585 100644 (file)
@@ -7,7 +7,7 @@ from ceph_volume.util import system, disk
 from ceph_volume.util import prepare as prepare_utils
 from ceph_volume.util import encryption as encryption_utils
 from ceph_volume.util import nvme as nvme_utils
-from ceph_volume.util.device import Device
+from ceph_volume.api import lvm as lvm_api
 from ceph_volume.devices.lvm.common import rollback_osd
 from ceph_volume.devices.raw.list import direct_report
 from typing import Any, Dict, List, Optional, TYPE_CHECKING
@@ -165,7 +165,9 @@ class Raw(BaseObjectStore):
 
         This function activates Ceph Object Storage Daemons (OSDs) on the system.
         It iterates over all block devices, checking if they have a LUKS2 signature and
-        are encrypted for Ceph. If a device's OSD fsid matches and it is enrolled with TPM2,
+        are encrypted for Ceph. LVs tagged by ``ceph-volume lvm prepare`` / ``lvm batch``
+        (``ceph.type`` in block/db/wal) are skipped so raw activation does not consume
+        LVM-backed OSDs. If a device's OSD fsid matches and it is enrolled with TPM2,
         the function pre-activates it. After collecting the relevant devices, it attempts to
         activate any OSDs found.
 
@@ -175,20 +177,22 @@ class Raw(BaseObjectStore):
         assert self.devices or self.osd_id or self.osd_fsid
 
         activated_any: bool = False
+        lvm_prepare_lv_paths = lvm_api.ceph_volume_lvm_prepare_lv_paths()
 
         for d in disk.lsblk_all(abspath=True):
             device: str = d.get('NAME', '')
+            if lvm_api.is_ceph_volume_lvm_prepared(device, lvm_prepare_lv_paths):
+                continue
             luks2 = encryption_utils.CephLuks2(device)
             if luks2.is_ceph_encrypted:
                 if luks2.is_tpm2_enrolled and self.osd_fsid == luks2.osd_fsid:
                     self.pre_activate_tpm2(device)
         found = direct_report(self.devices)
 
-        holders = disk.get_block_device_holders()
         for osd_uuid, meta in found.items():
             realpath_device = os.path.realpath(meta['device'])
-            parent_device = holders.get(realpath_device)
-            if parent_device and any('ceph.cluster_fsid' in lv.lv_tags for lv in Device(parent_device).lvs):
+            if lvm_api.is_ceph_volume_lvm_prepared(realpath_device,
+                                                   lvm_prepare_lv_paths):
                 continue
             osd_id = meta['osd_id']
             if self.osd_id is not None and str(osd_id) != str(self.osd_id):
index 35f7dea4f10d3c110413eee581534da28a8e63b9..93743226dca153a35d06f4f3c0827bbbfbfbea24 100644 (file)
@@ -48,6 +48,64 @@ class TestVolume:
     def test_is_not_ceph_device(self, dev):
         assert not api.is_ceph_device(dev)
 
+    @pytest.mark.parametrize('ceph_type,expected',
+                             [('block', True),
+                              ('db', True),
+                              ('wal', True),
+                              ('data', False),
+                              ('', False)])
+    def test_volume_is_lvm_objectstore_lv(self, ceph_type, expected):
+        tags = f'ceph.type={ceph_type},ceph.osd_id=0' if ceph_type else 'ceph.osd_id=0'
+        lv = api.Volume(lv_name='vg/lv', lv_tags=tags, lv_path='/dev/vg/lv')
+        assert api.volume_is_lvm_objectstore_lv(lv) is expected
+
+    def test_ceph_volume_lvm_prepare_lv_paths(self):
+        block_lv = api.Volume(
+            lv_name='ceph-vg/osd-block',
+            lv_tags='ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=x,ceph.cluster_fsid=y',
+            lv_path='/dev/ceph-vg/osd-block-uuid',
+        )
+        data_lv = api.Volume(
+            lv_name='ceph-vg/osd-data',
+            lv_tags='ceph.type=data,ceph.osd_id=1',
+            lv_path='/dev/ceph-vg/osd-data',
+        )
+        with patch('ceph_volume.api.lvm.get_lvs', return_value=[block_lv, data_lv]):
+            paths = api.ceph_volume_lvm_prepare_lv_paths()
+        assert '/dev/ceph-vg/osd-block-uuid' in paths
+
+    def test_is_ceph_volume_lvm_prepared_with_path_set(self):
+        paths = {'/dev/ceph-vg/osd-block-uuid'}
+        assert api.is_ceph_volume_lvm_prepared('/dev/ceph-vg/osd-block-uuid', paths)
+        assert not api.is_ceph_volume_lvm_prepared('/dev/sdb', paths)
+        assert not api.is_ceph_volume_lvm_prepared('/dev/sdb', set())
+        assert not api.is_ceph_volume_lvm_prepared('', paths)
+
+    def test_is_ceph_volume_lvm_prepared_true_single_lvs(self):
+        block_lv = api.Volume(
+            lv_name='ceph-vg/osd-block',
+            lv_tags='ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=x,ceph.cluster_fsid=y',
+            lv_path='/dev/ceph-vg/osd-block-uuid',
+        )
+        with patch('ceph_volume.api.lvm.get_lvs', return_value=[block_lv]):
+            assert api.is_ceph_volume_lvm_prepared('/dev/ceph-vg/osd-block-uuid')
+
+    def test_is_ceph_volume_lvm_prepared_false_empty_lvs(self):
+        with patch('ceph_volume.api.lvm.get_lvs', return_value=[]):
+            assert not api.is_ceph_volume_lvm_prepared('/dev/sdb')
+
+    def test_is_ceph_volume_lvm_prepared_false_no_ceph_type(self):
+        data_lv = api.Volume(
+            lv_name='ceph-vg/osd-data',
+            lv_tags='ceph.type=data,ceph.osd_id=0',
+            lv_path='/dev/vg/rawdata',
+        )
+        with patch('ceph_volume.api.lvm.get_lvs', return_value=[data_lv]):
+            assert not api.is_ceph_volume_lvm_prepared('/dev/vg/rawdata')
+
+    def test_is_ceph_volume_lvm_prepared_empty_path(self):
+        assert not api.is_ceph_volume_lvm_prepared('')
+
     def test_no_empty_lv_name(self):
         with pytest.raises(ValueError):
             api.Volume(lv_name='', lv_tags='')