]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: support zapping by osd-id for RAW OSDs 60395/head
authorGuillaume Abrioux <gabrioux@ibm.com>
Fri, 18 Oct 2024 08:56:32 +0000 (08:56 +0000)
committerGuillaume Abrioux <gabrioux@ibm.com>
Fri, 25 Oct 2024 07:52:05 +0000 (07:52 +0000)
Currently it is only possible to zap by osd-id for LVM-based OSDs.

This commit introduces the required changes in order to support zapping
by osd-id for RAW-based OSDs.

Fixes: https://tracker.ceph.com/issues/68576
Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
src/ceph-volume/ceph_volume/devices/lvm/zap.py
src/ceph-volume/ceph_volume/devices/raw/list.py
src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
src/ceph-volume/ceph_volume/tests/test_inventory.py
src/ceph-volume/ceph_volume/util/device.py
src/ceph-volume/ceph_volume/util/disk.py

index 388f6aeea27082fc7afc9862ec30953401bfdcae..c278de43eb0a9e3601b3c2220d110aaf24a8df44 100644 (file)
@@ -10,7 +10,8 @@ from ceph_volume.api import lvm as api
 from ceph_volume.util import system, encryption, disk, arg_validators, str_to_int, merge_dict
 from ceph_volume.util.device import Device
 from ceph_volume.systemd import systemctl
-from typing import Any, Dict, List
+from ceph_volume.devices.raw.list import direct_report
+from typing import Any, Dict, List, Set
 
 logger = logging.getLogger(__name__)
 mlogger = terminal.MultiLogger(__name__)
@@ -95,83 +96,126 @@ def zap_data(path):
         'conv=fsync'
     ])
 
-def find_associated_devices(osd_id: str = '', osd_fsid: str = '') -> List[api.Volume]:
-    """
-    From an ``osd_id`` and/or an ``osd_fsid``, filter out all the LVs in the
-    system that match those tag values, further detect if any partitions are
-    part of the OSD, and then return the set of LVs and partitions (if any).
-    """
-    lv_tags = {}
-    lv_tags = {key: value for key, value in {
-        'ceph.osd_id': osd_id,
-        'ceph.osd_fsid': osd_fsid
-    }.items() if value}
 
-    lvs = api.get_lvs(tags=lv_tags)
+class Zap:
+    help = 'Removes all data and filesystems from a logical volume or partition.'
 
-    if not lvs:
-        raise RuntimeError('Unable to find any LV for zapping OSD: '
-                            f'{osd_id or osd_fsid}')
-    devices_to_zap = ensure_associated_lvs(lvs, lv_tags)
+    def __init__(self, argv: List[str]) -> None:
+        self.argv = argv
+        self.osd_ids_to_zap: List[str] = []
 
-    return [Device(path) for path in set(devices_to_zap) if path]
+    def ensure_associated_raw(self, raw_report: Dict[str, Any]) -> List[str]:
+        osd_id: str = self.args.osd_id
+        osd_uuid: str = self.args.osd_fsid
+        raw_devices: Set[str] = set()
 
-def ensure_associated_lvs(lvs: List[api.Volume],
-                          lv_tags: Dict[str, Any] = {}) -> List[str]:
-    """
-    Go through each LV and ensure if backing devices (journal, wal, block)
-    are LVs or partitions, so that they can be accurately reported.
-    """
-    # look for many LVs for each backing type, because it is possible to
-    # receive a filtering for osd.1, and have multiple failed deployments
-    # leaving many journals with osd.1 - usually, only a single LV will be
-    # returned
-
-    db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
-    wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
-    backing_devices = [(db_lvs, 'db'),
-                       (wal_lvs, 'wal')]
-
-    verified_devices = []
-
-    for lv in lvs:
-        # go through each lv and append it, otherwise query `blkid` to find
-        # a physical device. Do this for each type (journal,db,wal) regardless
-        # if they have been processed in the previous LV, so that bad devices
-        # with the same ID can be caught
-        for ceph_lvs, _type in backing_devices:
-            if ceph_lvs:
-                verified_devices.extend([l.lv_path for l in ceph_lvs])
-                continue
-
-            # must be a disk partition, by querying blkid by the uuid we are
-            # ensuring that the device path is always correct
-            try:
-                device_uuid = lv.tags['ceph.%s_uuid' % _type]
-            except KeyError:
-                # Bluestore will not have ceph.journal_uuid, and Filestore
-                # will not not have ceph.db_uuid
-                continue
+        if len([details.get('osd_id') for _, details in raw_report.items() if details.get('osd_id') == osd_id]) > 1:
+            if not osd_uuid:
+                raise RuntimeError(f'Multiple OSDs found with id {osd_id}, pass --osd-fsid')
 
-            osd_device = disk.get_device_from_partuuid(device_uuid)
-            if not osd_device:
-                # if the osd_device is not found by the partuuid, then it is
-                # not possible to ensure this device exists anymore, so skip it
-                continue
-            verified_devices.append(osd_device)
+        if not osd_uuid:
+            for _, details in raw_report.items():
+                if details.get('osd_id') == int(osd_id):
+                    osd_uuid = details.get('osd_uuid')
+                    break
 
-        verified_devices.append(lv.lv_path)
+        for osd_uuid, details in raw_report.items():
+            device: str = details.get('device')
+            if details.get('osd_uuid') == osd_uuid:
+                raw_devices.add(device)
 
-    # reduce the list from all the duplicates that were added
-    return list(set(verified_devices))
+        return list(raw_devices)
+        
 
+    def find_associated_devices(self) -> List[api.Volume]:
+        """From an ``osd_id`` and/or an ``osd_fsid``, filter out all the Logical Volumes (LVs) in the
+        system that match those tag values, further detect if any partitions are
+        part of the OSD, and then return the set of LVs and partitions (if any).
 
-class Zap:
-    help = 'Removes all data and filesystems from a logical volume or partition.'
+        The function first queries the LVM-based OSDs using the provided `osd_id` or `osd_fsid`.
+        If no matches are found, it then searches the system for RAW-based OSDs.
 
-    def __init__(self, argv: List[str]) -> None:
-        self.argv = argv
-        self.osd_ids_to_zap: List[str] = []
+        Raises:
+            SystemExit: If no OSDs are found, the function raises a `SystemExit` with an appropriate message.
+
+        Returns:
+            List[api.Volume]: A list of `api.Volume` objects corresponding to the OSD's Logical Volumes (LVs)
+            or partitions that are associated with the given `osd_id` or `osd_fsid`.
+
+        Notes:
+            - If neither `osd_id` nor `osd_fsid` are provided, the function will not be able to find OSDs.
+            - The search proceeds from LVM-based OSDs to RAW-based OSDs if no Logical Volumes are found.
+        """
+        lv_tags = {}
+        lv_tags = {key: value for key, value in {
+            'ceph.osd_id': self.args.osd_id,
+            'ceph.osd_fsid': self.args.osd_fsid
+        }.items() if value}
+        devices_to_zap: List[str] = []
+        lvs = api.get_lvs(tags=lv_tags)
+
+        if lvs:
+            devices_to_zap = self.ensure_associated_lvs(lvs, lv_tags)
+        else:
+            mlogger.debug(f'No OSD identified by "{self.args.osd_id or self.args.osd_fsid}" was found among LVM-based OSDs.')
+            mlogger.debug('Proceeding to check RAW-based OSDs.')
+            raw_osds: Dict[str, Any] = direct_report()
+            if raw_osds:
+                devices_to_zap = self.ensure_associated_raw(raw_osds)
+        if not devices_to_zap:
+            raise SystemExit('No OSD were found.')
+
+        return [Device(path) for path in set(devices_to_zap) if path]
+
+    def ensure_associated_lvs(self,
+                              lvs: List[api.Volume],
+                              lv_tags: Dict[str, Any] = {}) -> List[str]:
+        """
+        Go through each LV and ensure if backing devices (journal, wal, block)
+        are LVs or partitions, so that they can be accurately reported.
+        """
+        # look for many LVs for each backing type, because it is possible to
+        # receive a filtering for osd.1, and have multiple failed deployments
+        # leaving many journals with osd.1 - usually, only a single LV will be
+        # returned
+
+        db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
+        wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
+        backing_devices = [(db_lvs, 'db'),
+                        (wal_lvs, 'wal')]
+
+        verified_devices = []
+
+        for lv in lvs:
+            # go through each lv and append it, otherwise query `blkid` to find
+            # a physical device. Do this for each type (journal,db,wal) regardless
+            # if they have been processed in the previous LV, so that bad devices
+            # with the same ID can be caught
+            for ceph_lvs, _type in backing_devices:
+                if ceph_lvs:
+                    verified_devices.extend([l.lv_path for l in ceph_lvs])
+                    continue
+
+                # must be a disk partition, by querying blkid by the uuid we are
+                # ensuring that the device path is always correct
+                try:
+                    device_uuid = lv.tags['ceph.%s_uuid' % _type]
+                except KeyError:
+                    # Bluestore will not have ceph.journal_uuid, and Filestore
+                    # will not not have ceph.db_uuid
+                    continue
+
+                osd_device = disk.get_device_from_partuuid(device_uuid)
+                if not osd_device:
+                    # if the osd_device is not found by the partuuid, then it is
+                    # not possible to ensure this device exists anymore, so skip it
+                    continue
+                verified_devices.append(osd_device)
+
+            verified_devices.append(lv.lv_path)
+
+        # reduce the list from all the duplicates that were added
+        return list(set(verified_devices))
 
     def unmount_lv(self, lv: api.Volume) -> None:
         if lv.tags.get('ceph.cluster_name') and lv.tags.get('ceph.osd_id'):
@@ -355,7 +399,6 @@ class Zap:
             SystemExit: When the device is a mapper and not a mpath device.
         """
         devices = self.args.devices
-
         for device in devices:
             mlogger.info("Zapping: %s", device.path)
             if device.is_mapper and not device.is_mpath:
@@ -388,7 +431,7 @@ class Zap:
                 mlogger.error("OSD ID %s is running, stop it with:" % self.args.osd_id)
                 mlogger.error("systemctl stop ceph-osd@%s" % self.args.osd_id)
                 raise SystemExit("Unable to zap devices associated with OSD ID: %s" % self.args.osd_id)
-        self.args.devices = find_associated_devices(self.args.osd_id, self.args.osd_fsid)
+        self.args.devices = self.find_associated_devices()
         self.zap()
 
     def dmcrypt_close(self, dmcrypt_uuid: str) -> None:
index f6ac08eab98a9038840df6627203b37f5ed9bcea..68923216a4116d5953d84148847269a3095a0b6a 100644 (file)
@@ -5,12 +5,14 @@ import logging
 from textwrap import dedent
 from ceph_volume import decorators, process
 from ceph_volume.util import disk
-from typing import Any, Dict, List as _List
+from ceph_volume.util.device import Device
+from typing import Any, Dict, Optional, List as _List
+from concurrent.futures import ThreadPoolExecutor
 
 logger = logging.getLogger(__name__)
 
 
-def direct_report(devices):
+def direct_report(devices: Optional[_List[str]] = None) -> Dict[str, Any]:
     """
     Other non-cli consumers of listing information will want to consume the
     report without the need to parse arguments or other flags. This helper
@@ -20,27 +22,29 @@ def direct_report(devices):
     _list = List([])
     return _list.generate(devices)
 
-def _get_bluestore_info(dev: str) -> Dict[str, Any]:
+def _get_bluestore_info(devices: _List[str]) -> Dict[str, Any]:
     result: Dict[str, Any] = {}
-    out, err, rc = process.call([
-        'ceph-bluestore-tool', 'show-label',
-        '--dev', dev], verbose_on_failure=False)
+    command: _List[str] = ['ceph-bluestore-tool',
+                           'show-label', '--bdev_aio_poll_ms=1']
+    for device in devices:
+        command.extend(['--dev', device])
+    out, err, rc = process.call(command, verbose_on_failure=False)
     if rc:
-        # ceph-bluestore-tool returns an error (below) if device is not bluestore OSD
-        #   > unable to read label for <device>: (2) No such file or directory
-        # but it's possible the error could be for a different reason (like if the disk fails)
-        logger.debug(f'assuming device {dev} is not BlueStore; ceph-bluestore-tool failed to get info from device: {out}\n{err}')
+        logger.debug(f"ceph-bluestore-tool couldn't detect any BlueStore device.\n{out}\n{err}")
     else:
         oj = json.loads(''.join(out))
-        if dev not in oj:
-            # should be impossible, so warn
-            logger.warning(f'skipping device {dev} because it is not reported in ceph-bluestore-tool output: {out}')
-        try:
-            result = disk.bluestore_info(dev, oj)
-        except KeyError as e:
-            # this will appear for devices that have a bluestore header but aren't valid OSDs
-            # for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
-            logger.error(f'device {dev} does not have all BlueStore data needed to be a valid OSD: {out}\n{e}')
+        for device in devices:
+            if device not in oj:
+                # should be impossible, so warn
+                logger.warning(f'skipping device {device} because it is not reported in ceph-bluestore-tool output: {out}')
+            if oj.get(device):
+                try:
+                    osd_uuid = oj[device]['osd_uuid']
+                    result[osd_uuid] = disk.bluestore_info(device, oj)
+                except KeyError as e:
+                    # this will appear for devices that have a bluestore header but aren't valid OSDs
+                    # for example, due to incomplete rollback of OSDs: https://tracker.ceph.com/issues/51869
+                    logger.error(f'device {device} does not have all BlueStore data needed to be a valid OSD: {out}\n{e}')
     return result
 
 
@@ -50,68 +54,67 @@ class List(object):
 
     def __init__(self, argv: _List[str]) -> None:
         self.argv = argv
-
-    def is_atari_partitions(self, _lsblk: Dict[str, Any]) -> bool:
-        dev = _lsblk['NAME']
-        if _lsblk.get('PKNAME'):
-            parent = _lsblk['PKNAME']
-            try:
-                if disk.has_bluestore_label(parent):
-                    logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
-                                    'device is likely a phantom Atari partition. device info: {}'.format(_lsblk)))
-                    return True
-            except OSError as e:
-                logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
-                            'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
-                return True
-        return False
-
-    def exclude_atari_partitions(self, _lsblk_all: Dict[str, Any]) -> _List[Dict[str, Any]]:
-        return [_lsblk for _lsblk in _lsblk_all if not self.is_atari_partitions(_lsblk)]
-
-    def generate(self, devs=None):
+        self.info_devices: _List[Dict[str, str]] = []
+        self.devices_to_scan: _List[str] = []
+
+    def exclude_atari_partitions(self) -> None:
+        result: _List[str] = []
+        for info_device in self.info_devices:
+            path = info_device['NAME']
+            parent_device = info_device.get('PKNAME')
+            if parent_device:
+                try:
+                    if disk.has_bluestore_label(parent_device):
+                        logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(path, parent_device),
+                                        'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
+                        continue
+                except OSError as e:
+                    logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(path),
+                                'failed to determine if parent device {} is BlueStore. err: {}'.format(parent_device, e)))
+                    continue
+            result.append(path)
+        self.devices_to_scan = result
+
+    def exclude_lvm_osd_devices(self) -> None:
+        with ThreadPoolExecutor() as pool:
+            filtered_devices_to_scan = pool.map(self.filter_lvm_osd_devices, self.devices_to_scan)
+            self.devices_to_scan = [device for device in filtered_devices_to_scan if device is not None]
+
+    def filter_lvm_osd_devices(self, device: str) -> Optional[str]:
+        d = Device(device)
+        return d.path if not d.ceph_device_lvm else None
+
+    def generate(self, devices: Optional[_List[str]] = None) -> Dict[str, Any]:
         logger.debug('Listing block devices via lsblk...')
-        info_devices = []
-        if not devs or not any(devs):
+        if not devices or not any(devices):
             # If no devs are given initially, we want to list ALL devices including children and
             # parents. Parent disks with child partitions may be the appropriate device to return if
             # the parent disk has a bluestore header, but children may be the most appropriate
             # devices to return if the parent disk does not have a bluestore header.
-            info_devices = disk.lsblk_all(abspath=True)
-            devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
+            self.info_devices = disk.lsblk_all(abspath=True)
+            # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
+            # bluestore's on-disk format as an Atari partition table. These false Atari partitions
+            # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
+            # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
+            # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
+            # exist if it is a phantom Atari partition, and the child should be ignored. If the
+            # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
+            # determine whether a parent is bluestore, we should err on the side of not reporting
+            # the child so as not to give a false negative.
+            self.exclude_atari_partitions()
+            self.exclude_lvm_osd_devices()
+
         else:
-            for dev in devs:
-                info_devices.append(disk.lsblk(dev, abspath=True))
-
-        # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
-        # bluestore's on-disk format as an Atari partition table. These false Atari partitions
-        # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
-        # partition. See https://tracker.ceph.com/issues/52060 for more info. If a device has a
-        # parent, it is a child. If the parent is a valid bluestore OSD, the child will only
-        # exist if it is a phantom Atari partition, and the child should be ignored. If the
-        # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
-        # determine whether a parent is bluestore, we should err on the side of not reporting
-        # the child so as not to give a false negative.
-        info_devices = self.exclude_atari_partitions(info_devices)
-
-        result = {}
-        logger.debug('inspecting devices: {}'.format(devs))
-        for info_device in info_devices:
-            bs_info = _get_bluestore_info(info_device['NAME'])
-            if not bs_info:
-                # None is also returned in the rare event that there is an issue reading info from
-                # a BlueStore disk, so be sure to log our assumption that it isn't bluestore
-                logger.info('device {} does not have BlueStore information'.format(info_device['NAME']))
-                continue
-            uuid = bs_info['osd_uuid']
-            if uuid not in result:
-                result[uuid] = {}
-            result[uuid].update(bs_info)
+            self.devices_to_scan = devices
+
+        result: Dict[str, Any] = {}
+        logger.debug('inspecting devices: {}'.format(self.devices_to_scan))
+        result = _get_bluestore_info(self.devices_to_scan)
 
         return result
 
     @decorators.needs_root
-    def list(self, args):
+    def list(self, args: argparse.Namespace) -> None:
         report = self.generate(args.device)
         if args.format == 'json':
             print(json.dumps(report, indent=4, sort_keys=True))
@@ -120,7 +123,7 @@ class List(object):
                 raise SystemExit('No valid Ceph devices found')
             raise RuntimeError('not implemented yet')
 
-    def main(self):
+    def main(self) -> None:
         sub_command_help = dedent("""
         List OSDs on raw devices with raw device labels (usually the first
         block of the device).
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/data_zap.py
new file mode 100644 (file)
index 0000000..cca64e8
--- /dev/null
@@ -0,0 +1,81 @@
+ceph_bluestore_tool_output = '''
+{
+    "/dev/sdb": {
+        "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+        "size": 1099511627776,
+        "btime": "2021-07-23T16:02:22.809186+0000",
+        "description": "main",
+        "bfm_blocks": "268435456",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "1099511627776",
+        "bluefs": "1",
+        "ceph_fsid": "sdb-fsid",
+        "ceph_version_when_created": "ceph version 19.3.0-5537-gb9ba4e48 (b9ba4e48633d6d90d5927a4e66b9ecbb4d7e6e73) squid (dev)",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "0"
+    },
+    "/dev/vdx": {
+        "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+        "size": 214748364800,
+        "btime": "2024-10-16T10:51:05.955279+0000",
+        "description": "main",
+        "bfm_blocks": "52428800",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "214748364800",
+        "bluefs": "1",
+        "ceph_fsid": "2d20bc8c-8a0c-11ef-aaba-525400e54507",
+        "ceph_version_when_created": "ceph version 19.3.0-5537-gb9ba4e48 (b9ba4e48633d6d90d5927a4e66b9ecbb4d7e6e73) squid (dev)",
+        "created_at": "2024-10-16T10:51:09.121455Z",
+        "elastic_shared_blobs": "1",
+        "epoch": "16",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "multi": "yes",
+        "osd_key": "AQCZmg9nxOKTCBAA6EQftuqMuKMHqypSAfqBsQ==",
+        "ready": "ready",
+        "type": "bluestore",
+        "whoami": "5"
+    },
+    "/dev/vdy": {
+        "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+        "size": 214748364800,
+        "btime": "2024-10-16T10:51:05.961279+0000",
+        "description": "bluefs db"
+    },
+    "/dev/vdz": {
+        "osd_uuid": "d5a496bc-dcb9-4ad0-a12c-393d3200d2b6",
+        "size": 214748364800,
+        "btime": "2024-10-16T10:51:05.961279+0000",
+        "description": "bluefs wal"
+    }
+}
+'''.split('\n')
+
+lsblk_all = ['NAME="/dev/sdb" KNAME="/dev/sdb" PKNAME="" PARTLABEL=""',
+             'NAME="/dev/sdx" KNAME="/dev/sdx" PKNAME="" PARTLABEL=""',
+             'NAME="/dev/sdy" KNAME="/dev/sdy" PKNAME="" PARTLABEL=""',
+             'NAME="/dev/sdz" KNAME="/dev/sdz" PKNAME="" PARTLABEL=""']
+
+blkid_output = ['/dev/ceph-1172bba3-3e0e-45e5-ace6-31ae8401221f/osd-block-5050a85c-d1a7-4d66-b4ba-2e9b1a2970ae: TYPE="ceph_bluestore" USAGE="other"']
+
+udevadm_property = '''DEVNAME=/dev/sdb
+DEVTYPE=disk
+ID_ATA=1
+ID_BUS=ata
+ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c
+ID_PATH=pci-0000:00:17.0-ata-3
+ID_PATH_TAG=pci-0000_00_17_0-ata-3
+ID_REVISION=70000P10
+ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A
+TAGS=:systemd:
+USEC_INITIALIZED=16117769'''.split('\n')
\ No newline at end of file
index efe52c053ffc31386ce8c7168bb038af6a1737b8..d9b3bdfd239128846d06fe038d0d7ab418fee7f3 100644 (file)
@@ -1,3 +1,4 @@
+# type: ignore
 import os
 import pytest
 from copy import deepcopy
@@ -5,6 +6,25 @@ from mock.mock import patch, call, Mock
 from ceph_volume import process
 from ceph_volume.api import lvm as api
 from ceph_volume.devices.lvm import zap
+from . import data_zap
+from typing import Tuple, List
+
+
+def process_call(command, **kw):
+    result: Tuple[List[str], List[str], int] = ''
+    if 'udevadm' in command:
+        result = data_zap.udevadm_property, [], 0
+    if 'ceph-bluestore-tool' in command:
+        result = data_zap.ceph_bluestore_tool_output, [], 0
+    if 'is-active' in command:
+        result = [], [], 1
+    if 'lsblk' in command:
+        result = data_zap.lsblk_all, [], 0
+    if 'blkid' in command:
+        result = data_zap.blkid_output, [], 0
+    if 'pvs' in command:
+        result = [], [], 0
+    return result
 
 
 class TestZap:
@@ -30,10 +50,10 @@ class TestZap:
             zap.Zap(argv=['--clear', '/dev/foo']).main()
         assert e.value.code == 1
 
-
-class TestFindAssociatedDevices(object):
-
-    def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+    @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_no_lvs_and_raw_found_that_match_id(self, is_root, monkeypatch, device_info):
         tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
                          lv_tags=tags, lv_path='/dev/VolGroup/lv')
@@ -41,10 +61,15 @@ class TestFindAssociatedDevices(object):
         volumes.append(osd)
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
 
-        with pytest.raises(RuntimeError):
-            zap.find_associated_devices(osd_id=10)
+        z = zap.Zap(['--osd-id', '10'])
 
-    def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+        with pytest.raises(SystemExit):
+            z.main()
+
+    @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_no_lvs_and_raw_found_that_match_fsid(self, is_root, monkeypatch):
         tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
                'ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
@@ -53,10 +78,15 @@ class TestFindAssociatedDevices(object):
         volumes.append(osd)
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
 
-        with pytest.raises(RuntimeError):
-            zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+        z = zap.Zap(['--osd-fsid', 'aaaa-lkjh'])
 
-    def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+        with pytest.raises(SystemExit):
+            z.main()
+
+    @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_no_lvs_and_raw_found_that_match_id_fsid(self, is_root, monkeypatch):
         tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
                'ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
@@ -65,45 +95,82 @@ class TestFindAssociatedDevices(object):
         volumes.append(osd)
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
 
-        with pytest.raises(RuntimeError):
-            zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+        z = zap.Zap(['--osd-id', '9', '--osd-fsid', 'aaaa-lkjh'])
+
+        with pytest.raises(SystemExit):
+            z.main()
 
-    def test_no_ceph_lvs_found(self, monkeypatch):
+    @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+    def test_no_ceph_lvs_and_no_ceph_raw_found(self, is_root, monkeypatch):
         osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
                          lv_path='/dev/VolGroup/lv')
         volumes = []
         volumes.append(osd)
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
 
-        with pytest.raises(RuntimeError):
-            zap.find_associated_devices(osd_id=100)
+        z = zap.Zap(['--osd-id', '100'])
+
+        with pytest.raises(SystemExit):
+            z.main()
 
-    def test_lv_is_matched_id(self, monkeypatch):
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_lv_is_matched_id(self, mock_zap, monkeypatch, is_root):
         tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
                          lv_path='/dev/VolGroup/lv', lv_tags=tags)
+        volumes = [osd]
+        monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+
+        z = zap.Zap(['--osd-id', '0'])
+        z.main()
+        assert z.args.devices[0].path == '/dev/VolGroup/lv'
+        mock_zap.assert_called_once()
+
+    # @patch('ceph_volume.devices.lvm.zap.disk.has_bluestore_label', Mock(return_value=True))
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_raw_is_matched_id(self, mock_zap, monkeypatch, is_root):
         volumes = []
-        volumes.append(osd)
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
-        monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
 
-        result = zap.find_associated_devices(osd_id='0')
-        assert result[0].path == '/dev/VolGroup/lv'
+        z = zap.Zap(['--osd-id', '0'])
+        z.main()
+        assert z.args.devices[0].path == '/dev/sdb'
+        mock_zap.assert_called_once()
 
-    def test_lv_is_matched_fsid(self, monkeypatch):
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    def test_lv_is_matched_fsid(self, mock_zap, monkeypatch, is_root):
         tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
                'ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
                          lv_path='/dev/VolGroup/lv', lv_tags=tags)
-        volumes = []
-        volumes.append(osd)
+        volumes = [osd]
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
         monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
 
-        result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
-        assert result[0].path == '/dev/VolGroup/lv'
+        z = zap.Zap(['--osd-fsid', 'asdf-lkjh'])
+        z.main()
+
+        assert z.args.devices[0].path == '/dev/VolGroup/lv'
+        mock_zap.assert_called_once
+
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_raw_is_matched_fsid(self, mock_zap, monkeypatch, is_root):
+        volumes = []
+        monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+
+        z = zap.Zap(['--osd-fsid', 'd5a496bc-dcb9-4ad0-a12c-393d3200d2b6'])
+        z.main()
 
-    def test_lv_is_matched_id_fsid(self, monkeypatch):
+        assert z.args.devices[0].path == '/dev/sdb'
+        mock_zap.assert_called_once
+
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    def test_lv_is_matched_id_fsid(self, mock_zap, monkeypatch, is_root):
         tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
                'ceph.type=data'
         osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
@@ -113,26 +180,43 @@ class TestFindAssociatedDevices(object):
         monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
         monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
 
-        result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
-        assert result[0].path == '/dev/VolGroup/lv'
-
+        z = zap.Zap(['--osd-id', '0', '--osd-fsid', 'asdf-lkjh', '--no-systemd'])
+        z.main()
 
-class TestEnsureAssociatedLVs(object):
+        assert z.args.devices[0].path == '/dev/VolGroup/lv'
+        mock_zap.assert_called_once
 
-    @patch('ceph_volume.devices.lvm.zap.api', Mock(return_value=[]))
-    def test_nothing_is_found(self):
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(return_value='/dev/sdb'))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_raw_is_matched_id_fsid(self, mock_zap, monkeypatch, is_root):
         volumes = []
-        result = zap.ensure_associated_lvs(volumes)
-        assert result == []
+        monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
 
-    def test_data_is_found(self, fake_call):
-        tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
-        osd = api.Volume(
-            lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+        z = zap.Zap(['--osd-id', '0', '--osd-fsid', 'd5a496bc-dcb9-4ad0-a12c-393d3200d2b6'])
+        z.main()
+
+        assert z.args.devices[0].path == '/dev/sdb'
+        mock_zap.assert_called_once
+
+    @patch('ceph_volume.devices.lvm.zap.Zap.zap')
+    @patch('ceph_volume.devices.raw.list.List.filter_lvm_osd_devices', Mock(side_effect=['/dev/vdx', '/dev/vdy', '/dev/vdz', None]))
+    @patch('ceph_volume.process.call', Mock(side_effect=process_call))
+    def test_raw_multiple_devices(self, mock_zap, monkeypatch, is_root):
         volumes = []
-        volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
-        assert result == ['/dev/VolGroup/data']
+        monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+        z = zap.Zap(['--osd-id', '5'])
+        z.main()
+
+        set([device.path for device in z.args.devices]) == {'/dev/vdx', '/dev/vdy', '/dev/vdz'}
+        mock_zap.assert_called_once
+
+    @patch('ceph_volume.devices.lvm.zap.direct_report', Mock(return_value={}))
+    @patch('ceph_volume.devices.lvm.zap.api.get_lvs', Mock(return_value=[]))
+    def test_nothing_is_found(self, is_root):
+        z = zap.Zap(['--osd-id', '0'])
+        with pytest.raises(SystemExit):
+            z.main()
 
     def test_block_is_found(self, fake_call):
         tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
@@ -140,7 +224,7 @@ class TestEnsureAssociatedLVs(object):
             lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
         volumes = []
         volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
+        result = zap.Zap([]).ensure_associated_lvs(volumes)
         assert result == ['/dev/VolGroup/block']
 
     def test_success_message_for_fsid(self, factory, is_root, capsys):
@@ -159,28 +243,6 @@ class TestEnsureAssociatedLVs(object):
         out, err = capsys.readouterr()
         assert "Zapping successful for OSD: 1" in err
 
-    def test_journal_is_found(self, fake_call):
-        tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
-        osd = api.Volume(
-            lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
-        volumes = []
-        volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
-        assert result == ['/dev/VolGroup/lv']
-
-    @patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
-    def test_multiple_journals_are_found(self):
-        tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
-        volumes = []
-        for i in range(3):
-            osd = api.Volume(
-                lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
-            volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
-        assert '/dev/VolGroup/lv0' in result
-        assert '/dev/VolGroup/lv1' in result
-        assert '/dev/VolGroup/lv2' in result
-
     @patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
     def test_multiple_dbs_are_found(self):
         tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
@@ -189,7 +251,7 @@ class TestEnsureAssociatedLVs(object):
             osd = api.Volume(
                 lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
             volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
+        result = zap.Zap([]).ensure_associated_lvs(volumes)
         assert '/dev/VolGroup/lv0' in result
         assert '/dev/VolGroup/lv1' in result
         assert '/dev/VolGroup/lv2' in result
@@ -202,7 +264,7 @@ class TestEnsureAssociatedLVs(object):
             osd = api.Volume(
                 lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
             volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
+        result = zap.Zap([]).ensure_associated_lvs(volumes)
         assert '/dev/VolGroup/lv0' in result
         assert '/dev/VolGroup/lv1' in result
         assert '/dev/VolGroup/lv2' in result
@@ -215,14 +277,14 @@ class TestEnsureAssociatedLVs(object):
             osd = api.Volume(
                 lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
             volumes.append(osd)
-        result = zap.ensure_associated_lvs(volumes)
+        result = zap.Zap([]).ensure_associated_lvs(volumes)
         assert '/dev/VolGroup/lvjournal' in result
         assert '/dev/VolGroup/lvwal' in result
         assert '/dev/VolGroup/lvdb' in result
 
     @patch('ceph_volume.devices.lvm.zap.api.get_lvs')
     def test_ensure_associated_lvs(self, m_get_lvs):
-        zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+        zap.Zap([]).ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
         calls = [
             call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
             call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/data_list.py
new file mode 100644 (file)
index 0000000..e1d1a48
--- /dev/null
@@ -0,0 +1,102 @@
+ceph_bluestore_tool_show_label_output: str = '''{
+    "/dev/sdb": {
+        "osd_uuid": "sdb-uuid",
+        "size": 1099511627776,
+        "btime": "2021-07-23T16:02:22.809186+0000",
+        "description": "main",
+        "bfm_blocks": "268435456",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "1099511627776",
+        "bluefs": "1",
+        "ceph_fsid": "sdb-fsid",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "0"
+    },
+    "/dev/sdb2": {
+        "osd_uuid": "sdb2-uuid",
+        "size": 1099511627776,
+        "btime": "2021-07-23T16:02:22.809186+0000",
+        "description": "main",
+        "bfm_blocks": "268435456",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "1099511627776",
+        "bluefs": "1",
+        "ceph_fsid": "sdb2-fsid",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "2"
+    },
+    "/dev/sde1": {
+        "osd_uuid": "sde1-uuid",
+        "size": 214747316224,
+        "btime": "2023-07-26T13:20:19.509457+0000",
+        "description": "main",
+        "bfm_blocks": "268435456",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "214747316224",
+        "bluefs": "1",
+        "ceph_fsid": "sde1-fsid",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQCSHcFkUeLIMBAAjKqANkXafjvVISkXt6FGCA==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "1"
+    },
+    "/dev/mapper/ceph--osd--block--1": {
+        "osd_uuid": "lvm-1-uuid",
+        "size": 549751619584,
+        "btime": "2021-07-23T16:04:37.881060+0000",
+        "description": "main",
+        "bfm_blocks": "134216704",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "549751619584",
+        "bluefs": "1",
+        "ceph_fsid": "lvm-1-fsid",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "2"
+    },
+    "/dev/mapper/ceph--osd--block--1": {
+        "osd_uuid": "lvm-1-uuid",
+        "size": 549751619584,
+        "btime": "2021-07-23T16:04:37.881060+0000",
+        "description": "main",
+        "bfm_blocks": "134216704",
+        "bfm_blocks_per_key": "128",
+        "bfm_bytes_per_block": "4096",
+        "bfm_size": "549751619584",
+        "bluefs": "1",
+        "ceph_fsid": "lvm-1-fsid",
+        "kv_backend": "rocksdb",
+        "magic": "ceph osd volume v026",
+        "mkfs_done": "yes",
+        "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
+        "ready": "ready",
+        "require_osd_release": "16",
+        "type": "bluestore",
+        "whoami": "2"
+    }
+}'''
\ No newline at end of file
index 604fb4faa3ef57be4dafe8f453dcc7019f3dac54..23d2bfdaa2c723df0c5d71cbccece4c5aca26146 100644 (file)
@@ -1,5 +1,7 @@
+# type: ignore
 import pytest
-from mock.mock import patch
+from .data_list import ceph_bluestore_tool_show_label_output
+from mock.mock import patch, Mock
 from ceph_volume.devices import raw
 
 # Sample lsblk output is below that overviews the test scenario. (--json output for reader clarity)
@@ -74,98 +76,6 @@ def _lsblk_output(dev, parent=None):
     ret = 'NAME="{}" KNAME="{}" PKNAME="{}"'.format(dev, dev, parent)
     return [ret] # needs to be in a list form
 
-def _bluestore_tool_label_output_sdb():
-    return '''{
-    "/dev/sdb": {
-        "osd_uuid": "sdb-uuid",
-        "size": 1099511627776,
-        "btime": "2021-07-23T16:02:22.809186+0000",
-        "description": "main",
-        "bfm_blocks": "268435456",
-        "bfm_blocks_per_key": "128",
-        "bfm_bytes_per_block": "4096",
-        "bfm_size": "1099511627776",
-        "bluefs": "1",
-        "ceph_fsid": "sdb-fsid",
-        "kv_backend": "rocksdb",
-        "magic": "ceph osd volume v026",
-        "mkfs_done": "yes",
-        "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
-        "ready": "ready",
-        "require_osd_release": "16",
-        "whoami": "0"
-    }
-}'''
-
-def _bluestore_tool_label_output_sdb2():
-    return '''{
-    "/dev/sdb2": {
-        "osd_uuid": "sdb2-uuid",
-        "size": 1099511627776,
-        "btime": "2021-07-23T16:02:22.809186+0000",
-        "description": "main",
-        "bfm_blocks": "268435456",
-        "bfm_blocks_per_key": "128",
-        "bfm_bytes_per_block": "4096",
-        "bfm_size": "1099511627776",
-        "bluefs": "1",
-        "ceph_fsid": "sdb2-fsid",
-        "kv_backend": "rocksdb",
-        "magic": "ceph osd volume v026",
-        "mkfs_done": "yes",
-        "osd_key": "AQAO6PpgK+y4CBAAixq/X7OVimbaezvwD/cDmg==",
-        "ready": "ready",
-        "require_osd_release": "16",
-        "whoami": "2"
-    }
-}'''
-
-def _bluestore_tool_label_output_sde1():
-    return '''{
-    "/dev/sde1": {
-        "osd_uuid": "sde1-uuid",
-        "size": 214747316224,
-        "btime": "2023-07-26T13:20:19.509457+0000",
-        "description": "main",
-        "bfm_blocks": "268435456",
-        "bfm_blocks_per_key": "128",
-        "bfm_bytes_per_block": "4096",
-        "bfm_size": "214747316224",
-        "bluefs": "1",
-        "ceph_fsid": "sde1-fsid",
-        "kv_backend": "rocksdb",
-        "magic": "ceph osd volume v026",
-        "mkfs_done": "yes",
-        "osd_key": "AQCSHcFkUeLIMBAAjKqANkXafjvVISkXt6FGCA==",
-        "ready": "ready",
-        "require_osd_release": "16",
-        "whoami": "1"
-    }
-}'''
-
-def _bluestore_tool_label_output_dm_okay():
-    return '''{
-    "/dev/mapper/ceph--osd--block--1": {
-        "osd_uuid": "lvm-1-uuid",
-        "size": 549751619584,
-        "btime": "2021-07-23T16:04:37.881060+0000",
-        "description": "main",
-        "bfm_blocks": "134216704",
-        "bfm_blocks_per_key": "128",
-        "bfm_bytes_per_block": "4096",
-        "bfm_size": "549751619584",
-        "bluefs": "1",
-        "ceph_fsid": "lvm-1-fsid",
-        "kv_backend": "rocksdb",
-        "magic": "ceph osd volume v026",
-        "mkfs_done": "yes",
-        "osd_key": "AQCU6Ppgz+UcIRAAh6IUjtPjiXBlEXfwO8ixzw==",
-        "ready": "ready",
-        "require_osd_release": "16",
-        "whoami": "2"
-    }
-}'''
-
 def _process_call_side_effect(command, **kw):
     if "lsblk" in command:
         if "/dev/" in command[-1]:
@@ -186,19 +96,7 @@ def _process_call_side_effect(command, **kw):
         pytest.fail('command {} needs behavior specified for it'.format(command))
 
     if "ceph-bluestore-tool" in command:
-        if "/dev/sdb" in command:
-            # sdb is a bluestore OSD
-            return _bluestore_tool_label_output_sdb(), '', 0
-        if "/dev/sdb2" in command:
-            # sdb2 is a phantom atari partition that appears to have some valid bluestore info
-            return _bluestore_tool_label_output_sdb2(), '', 0
-        if "/dev/sde1" in command:
-            return _bluestore_tool_label_output_sde1(), '', 0
-        if "/dev/mapper/ceph--osd--block--1" in command:
-            # dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
-            return _bluestore_tool_label_output_dm_okay(), '', 0
-        # sda and children, sdb's children, sdc, sdd, dm device 2 all do NOT have bluestore OSD data
-        return [], 'fake No such file or directory error', 1
+        return ceph_bluestore_tool_show_label_output, '', 0
     pytest.fail('command {} needs behavior specified for it'.format(command))
 
 def _has_bluestore_label_side_effect(disk_path):
@@ -224,6 +122,7 @@ def _has_bluestore_label_side_effect(disk_path):
 
 class TestList(object):
 
+    @patch('ceph_volume.devices.raw.list.List.exclude_lvm_osd_devices', Mock())
     @patch('ceph_volume.util.device.disk.get_devices')
     @patch('ceph_volume.util.disk.has_bluestore_label')
     @patch('ceph_volume.process.call')
@@ -257,6 +156,7 @@ class TestList(object):
         assert sde1['ceph_fsid'] == 'sde1-fsid'
         assert sde1['type'] == 'bluestore'
 
+    @patch('ceph_volume.devices.raw.list.List.exclude_lvm_osd_devices', Mock())
     @patch('ceph_volume.util.device.disk.get_devices')
     @patch('ceph_volume.util.disk.has_bluestore_label')
     @patch('ceph_volume.process.call')
@@ -275,4 +175,4 @@ class TestList(object):
 
         result = raw.list.List([]).generate()
         assert len(result) == 2
-        assert 'sdb-uuid' in result
+        assert {'sdb-uuid', 'sde1-uuid'} == set(result.keys())
index 832c0836642128c5ecbf24083bf5c44575a43c1f..29cd1fc4e4dbf0a144d1249056a12a606197b068 100644 (file)
@@ -118,7 +118,7 @@ def device_data(device_info):
 class TestInventory(object):
 
     expected_keys = [
-        'ceph_device',
+        'ceph_device_lvm',
         'path',
         'rejected_reasons',
         'sys_api',
index 82ee3266e3f1f842b86a785f43a24b5adf363f81..04eefeac750db071dda1a2d748726eeee50ed31a 100644 (file)
@@ -86,7 +86,7 @@ class Device(object):
      {attr:<25} {value}"""
 
     report_fields = [
-        'ceph_device',
+        'ceph_device_lvm',
         'rejected_reasons',
         'available',
         'path',
@@ -137,7 +137,7 @@ class Device(object):
         self.blkid_api = None
         self._exists = None
         self._is_lvm_member = None
-        self.ceph_device = False
+        self.ceph_device_lvm = False
         self.being_replaced: bool = self.is_being_replaced
         self._parse()
         if self.path in sys_info.devices.keys():
@@ -236,7 +236,7 @@ class Device(object):
             self.path = lv.lv_path
             self.vg_name = lv.vg_name
             self.lv_name = lv.name
-            self.ceph_device = lvm.is_ceph_device(lv)
+            self.ceph_device_lvm = lvm.is_ceph_device(lv)
         else:
             self.lvs = []
             if self.lsblk_all:
@@ -366,7 +366,7 @@ class Device(object):
                     self._is_lvm_member = True
                     self.lvs.extend(lvm.get_device_lvs(path))
                 if self.lvs:
-                    self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
+                    self.ceph_device_lvm = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
 
     def _get_partitions(self):
         """
index 30ee56808c7620e128d5c3391e9910d45f130211..77b55314f660a195dbd422bd57c5762657b2027b 100644 (file)
@@ -7,7 +7,7 @@ import json
 from ceph_volume import process, allow_loop_devices
 from ceph_volume.api import lvm
 from ceph_volume.util.system import get_file_contents
-from typing import Dict, List, Any, Union
+from typing import Dict, List, Any, Union, Optional
 
 
 logger = logging.getLogger(__name__)
@@ -251,7 +251,9 @@ def lsblk(device, columns=None, abspath=False):
 
     return result[0]
 
-def lsblk_all(device='', columns=None, abspath=False):
+def lsblk_all(device: str = '',
+              columns: Optional[List[str]] = None,
+              abspath: bool = False) -> List[Dict[str, str]]:
     """
     Create a dictionary of identifying values for a device using ``lsblk``.
     Each supported column is a key, in its *raw* format (all uppercase
@@ -332,7 +334,6 @@ def lsblk_all(device='', columns=None, abspath=False):
     if device:
         base_command.append('--nodeps')
         base_command.append(device)
-
     out, err, rc = process.call(base_command)
 
     if rc != 0: