]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: avoid unnecessary subprocess calls 46968/head
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 21 Jun 2022 07:55:49 +0000 (09:55 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Tue, 5 Jul 2022 13:15:38 +0000 (15:15 +0200)
These calls are slowing down `ceph-volume inventory`.
Especially because of the class `ceph_volume.util.device.Devices`.
It calls the class `ceph_volume.util.device.Device` for each device found
on the host which calls many times the binaries `lsblk`, `pvs`, `vgs`, `lvs` itself.
We can make only one call in `Devices()` to gather all details and use them during the whole runtime

current implementation:

```
        1    0.000    0.000    0.892    0.892 device.py:35(__init__) (class Devices)
        8    0.001    0.000    0.853    0.107 device.py:151(_parse)
       56    0.002    0.000    0.882    0.016 process.py:154(call)
        8    0.001    0.000    0.245    0.031 lvm.py:1099(get_lvs)
        8    0.000    0.000    0.026    0.003 disk.py:231(lsblk)
        8    0.000    0.000    0.435    0.054 device.py:278(_set_lvm_membership)
        1    0.000    0.000    0.885    0.885 device.py:38(<listcomp>) (multiple calls to Device() class)
      8/5    0.000    0.000    0.885    0.177 device.py:92(__init__) (class Device)

>>> timeit.timeit('Inventory([]).main()', setup='from ceph_volume.inventory import Inventory', number=1)

Device Path               Size         rotates available Model name
/dev/sdb                  200.00 GB    True    True      QEMU HARDDISK
/dev/sda                  200.00 GB    True    False     QEMU HARDDISK
/dev/sdc                  200.00 GB    True    False     QEMU HARDDISK
/dev/sdd                  200.00 GB    True    False     QEMU HARDDISK
/dev/vda                  11.00 GB     True    False
0.9309048530412838
>>>
```

new approach:

```
        1    0.000    0.000    0.253    0.253 device.py:35(__init__) (class Devices)
        5    0.000    0.000    0.144    0.029 device.py:167(_parse)
       21    0.001    0.000    0.246    0.012 process.py:154(call)
        1    0.000    0.000    0.032    0.032 lvm.py:1110(get_lvs)
        1    0.000    0.000    0.005    0.005 disk.py:236(lsblk_all)
        5    0.000    0.000    0.062    0.012 device.py:309(_set_lvm_membership)
        1    0.000    0.000    0.179    0.179 device.py:41(<listcomp>) (multiple calls to Device() class)
        5    0.000    0.000    0.179    0.036 device.py:99(__init__) (class Device)

>>> timeit.timeit('Inventory([]).main()', setup='from ceph_volume.inventory import Inventory', number=1)

Device Path               Size         rotates available Model name
/dev/sdb                  200.00 GB    True    True      QEMU HARDDISK
/dev/sda                  200.00 GB    True    False     QEMU HARDDISK
/dev/sdc                  200.00 GB    True    False     QEMU HARDDISK
/dev/sdd                  200.00 GB    True    False     QEMU HARDDISK
/dev/vda                  11.00 GB     True    False
0.2486933789914474
>>>
```

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit bea9f4b643ce32268ad79c0fc257b25ff2f8333c)

src/ceph-volume/ceph_volume/api/lvm.py
src/ceph-volume/ceph_volume/devices/lvm/listing.py
src/ceph-volume/ceph_volume/devices/raw/list.py
src/ceph-volume/ceph_volume/tests/conftest.py
src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
src/ceph-volume/ceph_volume/tests/util/test_device.py
src/ceph-volume/ceph_volume/util/device.py
src/ceph-volume/ceph_volume/util/disk.py

index 9a5907c5d7f994af3a6fc4460325ae03dde3aa42..b23466b7aa3cc53f315685e2059295cdad73ec65 100644 (file)
@@ -785,6 +785,17 @@ def get_device_vgs(device, name_prefix=''):
     return [VolumeGroup(**vg) for vg in vgs if vg['vg_name'] and vg['vg_name'].startswith(name_prefix)]
 
 
+def get_all_devices_vgs(name_prefix=''):
+    vg_fields = f'pv_name,{VG_FIELDS}'
+    cmd = ['pvs'] + VG_CMD_OPTIONS + ['-o', vg_fields]
+    stdout, stderr, returncode = process.call(
+        cmd,
+        run_on_host=True,
+        verbose_on_failure=False
+    )
+    vgs = _output_parser(stdout, vg_fields)
+    return [VolumeGroup(**vg) for vg in vgs]
+
 #################################
 #
 # Code for LVM Logical Volumes
index 44d5063ce37e09c559d4258bac5975b4f9954d1b..c16afdaa767286a198c6278defc150a8cc35fe71 100644 (file)
@@ -101,6 +101,8 @@ class List(object):
 
         report = {}
 
+        pvs = api.get_pvs()
+
         for lv in lvs:
             if not api.is_ceph_device(lv):
                 continue
@@ -109,8 +111,7 @@ class List(object):
             report.setdefault(osd_id, [])
             lv_report = lv.as_dict()
 
-            pvs = api.get_pvs(filters={'lv_uuid': lv.lv_uuid})
-            lv_report['devices'] = [pv.name for pv in pvs] if pvs else []
+            lv_report['devices'] = [pv.name for pv in pvs if pv.lv_uuid == lv.lv_uuid] if pvs else []
             report[osd_id].append(lv_report)
 
             phys_devs = self.create_report_non_lv_device(lv)
index 50d2046daac59335081ac5e868d26462e70a9a97..06a2b3c2240876f18ec5149ec7314975b03493a9 100644 (file)
@@ -68,22 +68,17 @@ class List(object):
 
     def generate(self, devs=None):
         logger.debug('Listing block devices via lsblk...')
+        info_devices = disk.lsblk_all(abspath=True)
         if devs is None or devs == []:
-            devs = []
             # If no devs are given initially, we want to list ALL devices including children and
             # parents. Parent disks with child partitions may be the appropriate device to return if
             # the parent disk has a bluestore header, but children may be the most appropriate
             # devices to return if the parent disk does not have a bluestore header.
-            out, err, ret = process.call([
-                'lsblk', '--paths', '--output=NAME', '--noheadings', '--list'
-            ])
-            assert not ret
-            devs = out
+            devs = [device['NAME'] for device in info_devices if device.get('NAME',)]
 
         result = {}
         logger.debug('inspecting devices: {}'.format(devs))
         for dev in devs:
-            info = disk.lsblk(dev, abspath=True)
             # Linux kernels built with CONFIG_ATARI_PARTITION enabled can falsely interpret
             # bluestore's on-disk format as an Atari partition table. These false Atari partitions
             # can be interpreted as real OSDs if a bluestore OSD was previously created on the false
@@ -93,28 +88,29 @@ class List(object):
             # parent isn't bluestore, then the child could be a valid bluestore OSD. If we fail to
             # determine whether a parent is bluestore, we should err on the side of not reporting
             # the child so as not to give a false negative.
-            if 'PKNAME' in info and info['PKNAME'] != "":
-                parent = info['PKNAME']
-                try:
-                    if disk.has_bluestore_label(parent):
-                        logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
-                                        'device is likely a phantom Atari partition. device info: {}'.format(info)))
+            for info_device in info_devices:
+                if 'PKNAME' in info_device and info_device['PKNAME'] != "":
+                    parent = info_device['PKNAME']
+                    try:
+                        if disk.has_bluestore_label(parent):
+                            logger.warning(('ignoring child device {} whose parent {} is a BlueStore OSD.'.format(dev, parent),
+                                            'device is likely a phantom Atari partition. device info: {}'.format(info_device)))
+                            continue
+                    except OSError as e:
+                        logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
+                                    'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
                         continue
-                except OSError as e:
-                    logger.error(('ignoring child device {} to avoid reporting invalid BlueStore data from phantom Atari partitions.'.format(dev),
-                                  'failed to determine if parent device {} is BlueStore. err: {}'.format(parent, e)))
-                    continue
 
-            bs_info = _get_bluestore_info(dev)
-            if bs_info is None:
-                # None is also returned in the rare event that there is an issue reading info from
-                # a BlueStore disk, so be sure to log our assumption that it isn't bluestore
-                logger.info('device {} does not have BlueStore information'.format(dev))
-                continue
-            uuid = bs_info['osd_uuid']
-            if uuid not in result:
-                result[uuid] = {}
-            result[uuid].update(bs_info)
+                bs_info = _get_bluestore_info(dev)
+                if bs_info is None:
+                    # None is also returned in the rare event that there is an issue reading info from
+                    # a BlueStore disk, so be sure to log our assumption that it isn't bluestore
+                    logger.info('device {} does not have BlueStore information'.format(dev))
+                    continue
+                uuid = bs_info['osd_uuid']
+                if uuid not in result:
+                    result[uuid] = {}
+                result[uuid].update(bs_info)
 
         return result
 
index c41a46074439766f5d1a7534e8b156531420f931..9645ba632bcf1593291314dda56fe1257db28415 100644 (file)
@@ -238,12 +238,9 @@ def ceph_parttype(request):
 @pytest.fixture
 def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
     monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
-                        lambda path: {'TYPE': 'disk', 'PARTLABEL': ceph_partlabel})
-    # setting blkid here too in order to be able to fall back to PARTTYPE based
-    # membership
-    monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
                         lambda path: {'TYPE': 'disk',
-                                      'PARTLABEL': '',
+                                      'NAME': 'sda',
+                                      'PARTLABEL': ceph_partlabel,
                                       'PARTTYPE': ceph_parttype})
 
 
@@ -264,6 +261,7 @@ def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
 def device_info_not_ceph_disk_member(monkeypatch, request):
     monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
                         lambda path: {'TYPE': 'disk',
+                                      'NAME': 'sda',
                                       'PARTLABEL': request.param[0]})
     monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
                         lambda path: {'TYPE': 'disk',
index d5ccee5c964956939de61832f53a6bb9cf45db70..5ad501bab94a43f2dbd3520b8528579426f1c890 100644 (file)
@@ -48,19 +48,19 @@ def _devices_side_effect():
         "/dev/mapper/ceph--osd--block--2": {},
     }
 
-def _lsblk_list_output():
+def _lsblk_all_devices(abspath=True):
     return [
-        '/dev/sda',
-        '/dev/sda1',
-        '/dev/sda2',
-        '/dev/sda3',
-        '/dev/sdb',
-        '/dev/sdb2',
-        '/dev/sdb3',
-        '/dev/sdc',
-        '/dev/sdd',
-        '/dev/mapper/ceph--osd--block--1',
-        '/dev/mapper/ceph--osd--block--2',
+        {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""},
+        {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"},
+        {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"},
+        {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"},
+        {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""},
+        {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"},
+        {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"},
+        {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""},
+        {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""},
+        {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"},
+        {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"},
     ]
 
 # dummy lsblk output for device with optional parent output
@@ -153,7 +153,7 @@ def _process_call_side_effect(command, **kw):
                 return _lsblk_output(dev, parent="/dev/sdd"), '', 0
             pytest.fail('dev {} needs behavior specified for it'.format(dev))
         if "/dev/" not in command:
-            return _lsblk_list_output(), '', 0
+            return _lsblk_all_devices(), '', 0
         pytest.fail('command {} needs behavior specified for it'.format(command))
 
     if "ceph-bluestore-tool" in command:
@@ -192,15 +192,16 @@ class TestList(object):
     @patch('ceph_volume.util.device.disk.get_devices')
     @patch('ceph_volume.util.disk.has_bluestore_label')
     @patch('ceph_volume.process.call')
-    def test_raw_list(self, patched_call, patched_bluestore_label, patched_get_devices):
+    @patch('ceph_volume.util.disk.lsblk_all')
+    def test_raw_list(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
         raw.list.logger.setLevel("DEBUG")
         patched_call.side_effect = _process_call_side_effect
+        patched_disk_lsblk.side_effect = _lsblk_all_devices
         patched_bluestore_label.side_effect = _has_bluestore_label_side_effect
         patched_get_devices.side_effect = _devices_side_effect
 
         result = raw.list.List([]).generate()
-        patched_call.assert_any_call(['lsblk', '--paths', '--output=NAME', '--noheadings', '--list'])
-        assert len(result) == 2
+        assert len(result) == 3
 
         sdb = result['sdb-uuid']
         assert sdb['osd_uuid'] == 'sdb-uuid'
@@ -219,17 +220,19 @@ class TestList(object):
     @patch('ceph_volume.util.device.disk.get_devices')
     @patch('ceph_volume.util.disk.has_bluestore_label')
     @patch('ceph_volume.process.call')
-    def test_raw_list_with_OSError(self, patched_call, patched_bluestore_label, patched_get_devices):
+    @patch('ceph_volume.util.disk.lsblk_all')
+    def test_raw_list_with_OSError(self, patched_disk_lsblk, patched_call, patched_bluestore_label, patched_get_devices):
         def _has_bluestore_label_side_effect_with_OSError(device_path):
             if device_path == "/dev/sdd":
                 raise OSError('fake OSError')
             return _has_bluestore_label_side_effect(device_path)
 
         raw.list.logger.setLevel("DEBUG")
+        patched_disk_lsblk.side_effect = _lsblk_all_devices
         patched_call.side_effect = _process_call_side_effect
         patched_bluestore_label.side_effect = _has_bluestore_label_side_effect_with_OSError
         patched_get_devices.side_effect = _devices_side_effect
 
         result = raw.list.List([]).generate()
-        assert len(result) == 1
+        assert len(result) == 3
         assert 'sdb-uuid' in result
index 19aaaa3bd7e839f3cda5d02c88597e67db18399b..85c4bebc049a731a6e8500c01719f4eb30ab7367 100644 (file)
@@ -82,12 +82,20 @@ class TestValidDevice(object):
         self.validator = arg_validators.ValidDevice()
 
     @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
-    def test_path_is_valid(self, m_has_bs_label, fake_call, patch_bluestore_label):
-        result = self.validator('/')
-        assert result.abspath == '/'
+    def test_path_is_valid(self, m_has_bs_label,
+                           fake_call, patch_bluestore_label,
+                           device_info):
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
+        device_info(lsblk=lsblk)
+        result = self.validator('/dev/sda')
+        assert result.abspath == '/dev/sda'
 
     @patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
-    def test_path_is_invalid(self, m_has_bs_label, fake_call, patch_bluestore_label):
+    def test_path_is_invalid(self, m_has_bs_label,
+                             fake_call, patch_bluestore_label,
+                             device_info):
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
+        device_info(lsblk=lsblk)
         with pytest.raises(argparse.ArgumentError):
             self.validator('/device/does/not/exist')
 
index f6e439279d78c4e80e6b6aafdf0882dcf8bfd5fa..540d5084d70eda47f1a6298059eb54a22c45e4fa 100644 (file)
@@ -16,8 +16,8 @@ class TestDevice(object):
                             deepcopy(volumes))
 
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "disk"}
-        device_info(devices=data,lsblk=lsblk)
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
+        device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.sys_api
         assert "foo" in disk.sys_api
@@ -32,7 +32,7 @@ class TestDevice(object):
 
         # 5GB in size
         data = {"/dev/sda": {"size": "5368709120"}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.lvm_size.gb == 4
@@ -40,14 +40,14 @@ class TestDevice(object):
     def test_lvm_size_rounds_down(self, fake_call, device_info):
         # 5.5GB in size
         data = {"/dev/sda": {"size": "5905580032"}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.lvm_size.gb == 4
 
     def test_is_lv(self, fake_call, device_info):
         data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"}
-        lsblk = {"TYPE": "lvm"}
+        lsblk = {"TYPE": "lvm", "NAME": "vg-lv"}
         device_info(lv=data,lsblk=lsblk)
         disk = device.Device("vg/lv")
         assert disk.is_lv
@@ -57,7 +57,7 @@ class TestDevice(object):
                                  pv_tags={})
         pvolumes = []
         pvolumes.append(BarPVolume)
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(lsblk=lsblk)
         monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {})
 
@@ -68,42 +68,42 @@ class TestDevice(object):
         vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
                              vg_extent_size=1073741824)
         monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
         assert len(disk.vgs) == 1
 
     def test_device_is_device(self, fake_call, device_info):
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "device"}
+        lsblk = {"TYPE": "device", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.is_device is True
 
     def test_device_is_rotational(self, fake_call, device_info):
         data = {"/dev/sda": {"rotational": "1"}}
-        lsblk = {"TYPE": "device"}
+        lsblk = {"TYPE": "device", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.rotational
 
     def test_device_is_not_rotational(self, fake_call, device_info):
         data = {"/dev/sda": {"rotational": "0"}}
-        lsblk = {"TYPE": "device"}
+        lsblk = {"TYPE": "device", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.rotational
 
     def test_device_is_rotational_lsblk(self, fake_call, device_info):
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "device", "ROTA": "1"}
+        lsblk = {"TYPE": "device", "ROTA": "1", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.rotational
 
     def test_device_is_not_rotational_lsblk(self, fake_call, device_info):
         data = {"/dev/sda": {"rotational": "0"}}
-        lsblk = {"TYPE": "device", "ROTA": "0"}
+        lsblk = {"TYPE": "device", "ROTA": "0", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.rotational
@@ -111,60 +111,60 @@ class TestDevice(object):
     def test_device_is_rotational_defaults_true(self, fake_call, device_info):
         # rotational will default true if no info from sys_api or lsblk is found
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "device", "foo": "bar"}
+        lsblk = {"TYPE": "device", "foo": "bar", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.rotational
 
     def test_disk_is_device(self, fake_call, device_info):
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.is_device is True
 
     def test_is_partition(self, fake_call, device_info):
         data = {"/dev/sda1": {"foo": "bar"}}
-        lsblk = {"TYPE": "part", "PKNAME": "sda"}
+        lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda1")
         assert disk.is_partition
 
     def test_mpath_device_is_device(self, fake_call, device_info):
         data = {"/dev/foo": {"foo": "bar"}}
-        lsblk = {"TYPE": "mpath"}
+        lsblk = {"TYPE": "mpath", "NAME": "foo"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/foo")
         assert disk.is_device is True
 
     def test_is_not_lvm_member(self, fake_call, device_info):
         data = {"/dev/sda1": {"foo": "bar"}}
-        lsblk = {"TYPE": "part", "PKNAME": "sda"}
+        lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda1")
         assert not disk.is_lvm_member
 
     def test_is_lvm_member(self, fake_call, device_info):
         data = {"/dev/sda1": {"foo": "bar"}}
-        lsblk = {"TYPE": "part", "PKNAME": "sda"}
+        lsblk = {"TYPE": "part", "NAME": "sda1", "PKNAME": "sda"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/sda1")
         assert not disk.is_lvm_member
 
     def test_is_mapper_device(self, fake_call, device_info):
-        lsblk = {"TYPE": "lvm"}
+        lsblk = {"TYPE": "lvm", "NAME": "foo"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/mapper/foo")
         assert disk.is_mapper
 
     def test_dm_is_mapper_device(self, fake_call, device_info):
-        lsblk = {"TYPE": "lvm"}
+        lsblk = {"TYPE": "lvm", "NAME": "dm-4"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/dm-4")
         assert disk.is_mapper
 
     def test_is_not_mapper_device(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.is_mapper
@@ -176,6 +176,7 @@ class TestDevice(object):
         assert disk.is_ceph_disk_member
 
     @pytest.mark.usefixtures("blkid_ceph_disk_member",
+                             "lsblk_ceph_disk_member",
                              "disable_kernel_queries")
     def test_is_ceph_disk_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
         disk = device.Device("/dev/sda")
@@ -190,6 +191,7 @@ class TestDevice(object):
         assert "Used by ceph-disk" in disk.rejected_reasons
 
     @pytest.mark.usefixtures("blkid_ceph_disk_member",
+                             "lsblk_ceph_disk_member",
                              "disable_kernel_queries")
     def test_is_ceph_disk_member_not_available_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
         disk = device.Device("/dev/sda")
@@ -199,14 +201,14 @@ class TestDevice(object):
 
     def test_reject_removable_device(self, fake_call, device_info):
         data = {"/dev/sdb": {"removable": 1}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sdb"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sdb")
         assert not disk.available
 
     def test_reject_device_with_gpt_headers(self, fake_call, device_info):
         data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sdb"}
         blkid= {"PTTYPE": "gpt"}
         device_info(
             devices=data,
@@ -218,42 +220,42 @@ class TestDevice(object):
 
     def test_accept_non_removable_device(self, fake_call, device_info):
         data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sdb"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sdb")
         assert disk.available
 
     def test_reject_not_acceptable_device(self, fake_call, device_info):
         data = {"/dev/dm-0": {"foo": "bar"}}
-        lsblk = {"TYPE": "mpath"}
+        lsblk = {"TYPE": "mpath", "NAME": "dm-0"}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/dm-0")
         assert not disk.available
 
     def test_reject_readonly_device(self, fake_call, device_info):
         data = {"/dev/cdrom": {"ro": 1}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "cdrom"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/cdrom")
         assert not disk.available
 
     def test_reject_smaller_than_5gb(self, fake_call, device_info):
         data = {"/dev/sda": {"size": 5368709119}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.available, 'too small device is available'
 
     def test_accept_non_readonly_device(self, fake_call, device_info):
         data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(devices=data,lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.available
 
     def test_reject_bluestore_device(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
         patch_bluestore_label.return_value = True
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.available
@@ -261,13 +263,14 @@ class TestDevice(object):
 
     def test_reject_device_with_oserror(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
         patch_bluestore_label.side_effect = OSError('test failure')
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert not disk.available
         assert "Failed to determine if device is BlueStore" in disk.rejected_reasons
 
-    @pytest.mark.usefixtures("device_info_not_ceph_disk_member",
+    @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+                             "device_info_not_ceph_disk_member",
                              "disable_kernel_queries")
     def test_is_not_ceph_disk_member_lsblk(self, fake_call, patch_bluestore_label):
         disk = device.Device("/dev/sda")
@@ -277,7 +280,7 @@ class TestDevice(object):
         vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536,
                              vg_extent_size=4194304)
         monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
@@ -289,7 +292,7 @@ class TestDevice(object):
         vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4,
                              vg_extent_size=1073741824)
         monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
@@ -303,7 +306,7 @@ class TestDevice(object):
         vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536,
                              vg_extent_size=4194304)
         monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2])
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
@@ -315,7 +318,7 @@ class TestDevice(object):
     def test_used_by_ceph(self, fake_call, device_info,
                           monkeypatch, ceph_type):
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "part", "PKNAME": "sda"}
+        lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"}
         FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
                                  lv_uuid="0000", pv_tags={}, vg_name="vg")
         pvolumes = []
@@ -342,7 +345,7 @@ class TestDevice(object):
         pvolumes = []
         pvolumes.append(FooPVolume)
         data = {"/dev/sda": {"foo": "bar"}}
-        lsblk = {"TYPE": "part", "PKNAME": "sda"}
+        lsblk = {"TYPE": "part", "NAME": "sda", "PKNAME": "sda"}
         lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}}
         monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes)
 
@@ -352,7 +355,7 @@ class TestDevice(object):
 
     def test_get_device_id(self, fake_call, device_info):
         udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']}
-        lsblk = {"TYPE": "disk"}
+        lsblk = {"TYPE": "disk", "NAME": "sda"}
         device_info(udevadm=udev,lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL'
@@ -372,26 +375,26 @@ class TestDevice(object):
 class TestDeviceEncryption(object):
 
     def test_partition_is_not_encrypted_lsblk(self, fake_call, device_info):
-        lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'PKNAME': 'sda'}
+        lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'NAME': 'sda', 'PKNAME': 'sda'}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.is_encrypted is False
 
     def test_partition_is_encrypted_lsblk(self, fake_call, device_info):
-        lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'PKNAME': 'sda'}
+        lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'PKNAME': 'sda'}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/sda")
         assert disk.is_encrypted is True
 
     def test_partition_is_not_encrypted_blkid(self, fake_call, device_info):
-        lsblk = {'TYPE': 'part', 'PKNAME': 'sda'}
+        lsblk = {'TYPE': 'part', 'NAME': 'sda', 'PKNAME': 'sda'}
         blkid = {'TYPE': 'ceph data'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
         assert disk.is_encrypted is False
 
     def test_partition_is_encrypted_blkid(self, fake_call, device_info):
-        lsblk = {'TYPE': 'part', 'PKNAME': 'sda'}
+        lsblk = {'TYPE': 'part', 'NAME': 'sda' ,'PKNAME': 'sda'}
         blkid = {'TYPE': 'crypto_LUKS'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -400,7 +403,7 @@ class TestDeviceEncryption(object):
     def test_mapper_is_encrypted_luks1(self, fake_call, device_info, monkeypatch):
         status = {'type': 'LUKS1'}
         monkeypatch.setattr(device, 'encryption_status', lambda x: status)
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid','TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/mapper/uuid")
@@ -409,7 +412,7 @@ class TestDeviceEncryption(object):
     def test_mapper_is_encrypted_luks2(self, fake_call, device_info, monkeypatch):
         status = {'type': 'LUKS2'}
         monkeypatch.setattr(device, 'encryption_status', lambda x: status)
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/mapper/uuid")
@@ -418,7 +421,7 @@ class TestDeviceEncryption(object):
     def test_mapper_is_encrypted_plain(self, fake_call, device_info, monkeypatch):
         status = {'type': 'PLAIN'}
         monkeypatch.setattr(device, 'encryption_status', lambda x: status)
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/mapper/uuid")
@@ -426,14 +429,14 @@ class TestDeviceEncryption(object):
 
     def test_mapper_is_not_encrypted_plain(self, fake_call, device_info, monkeypatch):
         monkeypatch.setattr(device, 'encryption_status', lambda x: {})
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'uuid', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/mapper/uuid")
         assert disk.is_encrypted is False
 
     def test_lv_is_encrypted_blkid(self, fake_call, device_info):
-        lsblk = {'TYPE': 'lvm'}
+        lsblk = {'TYPE': 'lvm', 'NAME': 'sda'}
         blkid = {'TYPE': 'crypto_LUKS'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -441,7 +444,7 @@ class TestDeviceEncryption(object):
         assert disk.is_encrypted is True
 
     def test_lv_is_not_encrypted_blkid(self, fake_call, factory, device_info):
-        lsblk = {'TYPE': 'lvm'}
+        lsblk = {'TYPE': 'lvm', 'NAME': 'sda'}
         blkid = {'TYPE': 'xfs'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -449,7 +452,7 @@ class TestDeviceEncryption(object):
         assert disk.is_encrypted is False
 
     def test_lv_is_encrypted_lsblk(self, fake_call, device_info):
-        lsblk = {'FSTYPE': 'crypto_LUKS', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'crypto_LUKS', 'NAME': 'sda', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -457,7 +460,7 @@ class TestDeviceEncryption(object):
         assert disk.is_encrypted is True
 
     def test_lv_is_not_encrypted_lsblk(self, fake_call, factory, device_info):
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -465,7 +468,7 @@ class TestDeviceEncryption(object):
         assert disk.is_encrypted is False
 
     def test_lv_is_encrypted_lvm_api(self, fake_call, factory, device_info):
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -473,7 +476,7 @@ class TestDeviceEncryption(object):
         assert disk.is_encrypted is True
 
     def test_lv_is_not_encrypted_lvm_api(self, fake_call, factory, device_info):
-        lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+        lsblk = {'FSTYPE': 'xfs', 'NAME': 'sda', 'TYPE': 'lvm'}
         blkid = {'TYPE': 'mapper'}
         device_info(lsblk=lsblk, blkid=blkid)
         disk = device.Device("/dev/sda")
@@ -492,27 +495,33 @@ class TestDeviceOrdering(object):
         }
 
     def test_valid_before_invalid(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk"}
-        device_info(devices=self.data,lsblk=lsblk)
+        lsblk_sda = {"NAME": "sda", "TYPE": "disk"}
+        lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"}
+        device_info(devices=self.data,lsblk=lsblk_sda)
         sda = device.Device("/dev/sda")
+        device_info(devices=self.data,lsblk=lsblk_sdb)
         sdb = device.Device("/dev/sdb")
 
         assert sda < sdb
         assert sdb > sda
 
     def test_valid_alphabetical_ordering(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk"}
-        device_info(devices=self.data,lsblk=lsblk)
+        lsblk_sda = {"NAME": "sda", "TYPE": "disk"}
+        lsblk_sdc = {"NAME": "sdc", "TYPE": "disk"}
+        device_info(devices=self.data,lsblk=lsblk_sda)
         sda = device.Device("/dev/sda")
+        device_info(devices=self.data,lsblk=lsblk_sdc)
         sdc = device.Device("/dev/sdc")
 
         assert sda < sdc
         assert sdc > sda
 
     def test_invalid_alphabetical_ordering(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk"}
-        device_info(devices=self.data,lsblk=lsblk)
+        lsblk_sdb = {"NAME": "sdb", "TYPE": "disk"}
+        lsblk_sdd = {"NAME": "sdd", "TYPE": "disk"}
+        device_info(devices=self.data,lsblk=lsblk_sdb)
         sdb = device.Device("/dev/sdb")
+        device_info(devices=self.data,lsblk=lsblk_sdd)
         sdd = device.Device("/dev/sdd")
 
         assert sdb < sdd
@@ -522,20 +531,22 @@ class TestDeviceOrdering(object):
 class TestCephDiskDevice(object):
 
     def test_partlabel_lsblk(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk", "PARTLABEL": ""}
+        lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": ""}
         device_info(lsblk=lsblk)
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
 
         assert disk.partlabel == ''
 
     def test_partlabel_blkid(self, fake_call, device_info):
+        lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph data"}
         blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"}
-        device_info(blkid=blkid)
+        device_info(blkid=blkid, lsblk=lsblk)
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
 
         assert disk.partlabel == 'ceph data'
 
-    @pytest.mark.usefixtures("blkid_ceph_disk_member",
+    @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+                             "blkid_ceph_disk_member",
                              "disable_kernel_queries")
     def test_is_member_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
@@ -545,14 +556,14 @@ class TestCephDiskDevice(object):
     @pytest.mark.usefixtures("lsblk_ceph_disk_member",
                              "disable_kernel_queries")
     def test_is_member_lsblk(self, fake_call, patch_bluestore_label, device_info):
-        lsblk = {"TYPE": "disk", "PARTLABEL": "ceph"}
+        lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "ceph"}
         device_info(lsblk=lsblk)
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
 
         assert disk.is_member is True
 
     def test_unknown_type(self, fake_call, device_info):
-        lsblk = {"TYPE": "disk", "PARTLABEL": "gluster"}
+        lsblk = {"TYPE": "disk", "NAME": "sda", "PARTLABEL": "gluster"}
         device_info(lsblk=lsblk)
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
 
@@ -560,7 +571,8 @@ class TestCephDiskDevice(object):
 
     ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block']
 
-    @pytest.mark.usefixtures("blkid_ceph_disk_member",
+    @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+                             "blkid_ceph_disk_member",
                              "disable_kernel_queries")
     def test_type_blkid(self, monkeypatch, fake_call, device_info, ceph_partlabel):
         disk = device.CephDiskDevice(device.Device("/dev/sda"))
index a3bacfa1c0cbdb6a98b1c7211d41a3c6347108a8..f101f4a6a2b9d78cc90bf129ad40d2101908cb71 100644 (file)
@@ -33,10 +33,17 @@ class Devices(object):
     """
 
     def __init__(self, filter_for_batch=False, with_lsm=False):
+        lvs = lvm.get_lvs()
+        lsblk_all = disk.lsblk_all()
+        all_devices_vgs = lvm.get_all_devices_vgs()
         if not sys_info.devices:
             sys_info.devices = disk.get_devices()
-        self.devices = [Device(k, with_lsm) for k in
-                            sys_info.devices.keys()]
+        self.devices = [Device(k,
+                               with_lsm,
+                               lvs=lvs,
+                               lsblk_all=lsblk_all,
+                               all_devices_vgs=all_devices_vgs) for k in
+                        sys_info.devices.keys()]
         if filter_for_batch:
             self.devices = [d for d in self.devices if d.available_lvm_batch]
 
@@ -89,18 +96,23 @@ class Device(object):
     # unittests
     lvs = []
 
-    def __init__(self, path, with_lsm=False):
+    def __init__(self, path, with_lsm=False, lvs=None, lsblk_all=None, all_devices_vgs=None):
         self.path = path
         # LVs can have a vg/lv path, while disks will have /dev/sda
         self.abspath = path
+        if not sys_info.devices:
+            sys_info.devices = disk.get_devices()
+        self.sys_api = sys_info.devices.get(self.abspath, {})
+        self.partitions = self._get_partitions()
         self.lv_api = None
-        self.lvs = []
+        self.lvs = [] if not lvs else lvs
+        self.lsblk_all = lsblk_all
+        self.all_devices_vgs = all_devices_vgs
         self.vgs = []
         self.vg_name = None
         self.lv_name = None
         self.disk_api = {}
-        self.blkid_api = {}
-        self.sys_api = {}
+        self.blkid_api = None
         self._exists = None
         self._is_lvm_member = None
         self._parse()
@@ -148,10 +160,12 @@ class Device(object):
     def __hash__(self):
         return hash(self.path)
 
+    def load_blkid_api(self):
+        if self.blkid_api is None:
+            self.blkid_api = disk.blkid(self.path)
+
     def _parse(self):
-        if not sys_info.devices:
-            sys_info.devices = disk.get_devices()
-        self.sys_api = sys_info.devices.get(self.abspath, {})
+        lv = None
         if not self.sys_api:
             # if no device was found check if we are a partition
             partname = self.abspath.split('/')[-1]
@@ -161,14 +175,27 @@ class Device(object):
                     self.sys_api = part
                     break
 
-        # if the path is not absolute, we have 'vg/lv', let's use LV name
-        # to get the LV.
-        if self.path[0] == '/':
-            lv = lvm.get_single_lv(filters={'lv_path': self.path})
+        if self.lvs:
+            for _lv in self.lvs:
+                # if the path is not absolute, we have 'vg/lv', let's use LV name
+                # to get the LV.
+                if self.path[0] == '/':
+                    if _lv.lv_path == self.path:
+                        lv = _lv
+                        break
+                else:
+                    vgname, lvname = self.path.split('/')
+                    if _lv.lv_name == lvname and _lv.vg_name == vgname:
+                        lv = _lv
+                        break
         else:
-            vgname, lvname = self.path.split('/')
-            lv = lvm.get_single_lv(filters={'lv_name': lvname,
-                                            'vg_name': vgname})
+            if self.path[0] == '/':
+                lv = lvm.get_single_lv(filters={'lv_path': self.path})
+            else:
+                vgname, lvname = self.path.split('/')
+                lv = lvm.get_single_lv(filters={'lv_name': lvname,
+                                                'vg_name': vgname})
+
         if lv:
             self.lv_api = lv
             self.lvs = [lv]
@@ -177,8 +204,12 @@ class Device(object):
             self.lv_name = lv.name
             self.ceph_device = lvm.is_ceph_device(lv)
         else:
-            dev = disk.lsblk(self.path)
-            self.blkid_api = disk.blkid(self.path)
+            if self.lsblk_all:
+                for dev in self.lsblk_all:
+                    if dev['NAME'] == os.path.basename(self.path):
+                        break
+            else:
+                dev = disk.lsblk(self.path)
             self.disk_api = dev
             device_type = dev.get('TYPE', '')
             # always check is this is an lvm member
@@ -281,29 +312,37 @@ class Device(object):
             # VGs, should we consider it as part of LVM? We choose not to
             # here, because most likely, we need to use VGs from this PV.
             self._is_lvm_member = False
-            for path in self._get_device_with_partitions_list():
-                vgs = lvm.get_device_vgs(path)
+            device_to_check = [self.abspath]
+            device_to_check.extend(self.partitions)
+
+            # a pv can only be in one vg, so this should be safe
+            # FIXME: While the above assumption holds, sda1 and sda2
+            # can each host a PV and VG. I think the vg_name property is
+            # actually unused (not 100% sure) and can simply be removed
+            vgs = None
+            for path in device_to_check:
+                if self.all_devices_vgs:
+                    for dev_vg in self.all_devices_vgs:
+                        if dev_vg.pv_name == path:
+                            vgs = [dev_vg]
+                else:
+                    vgs = lvm.get_device_vgs(path)
                 if vgs:
                     self.vgs.extend(vgs)
-                    # a pv can only be in one vg, so this should be safe
-                    # FIXME: While the above assumption holds, sda1 and sda2
-                    # can each host a PV and VG. I think the vg_name property is
-                    # actually unused (not 100% sure) and can simply be removed
                     self.vg_name = vgs[0]
                     self._is_lvm_member = True
                     self.lvs.extend(lvm.get_device_lvs(path))
-        return self._is_lvm_member
 
-    def _get_device_with_partitions_list(self):
+    def _get_partitions(self):
         """
         For block devices LVM can reside on the raw block device or on a
         partition. Return a list of paths to be checked for a pv.
         """
-        paths = [self.abspath]
+        partitions = []
         path_dir = os.path.dirname(self.abspath)
-        for part in self.sys_api.get('partitions', {}).keys():
-            paths.append(os.path.join(path_dir, part))
-        return paths
+        for partition in self.sys_api.get('partitions', {}).keys():
+            partitions.append(os.path.join(path_dir, partition))
+        return partitions
 
     @property
     def exists(self):
@@ -311,10 +350,12 @@ class Device(object):
 
     @property
     def has_fs(self):
+        self.load_blkid_api()
         return 'TYPE' in self.blkid_api
 
     @property
     def has_gpt_headers(self):
+        self.load_blkid_api()
         return self.blkid_api.get("PTTYPE") == "gpt"
 
     @property
@@ -362,14 +403,20 @@ class Device(object):
 
     @property
     def is_ceph_disk_member(self):
-        is_member = self.ceph_disk.is_member
+        def is_member(device):
+            return 'ceph' in device.get('PARTLABEL', '') or \
+                device.get('PARTTYPE', '') in ceph_disk_guids.keys()
+        # If we come from Devices(), self.lsblk_all is set already.
+        # Otherwise, we have to grab the data.
+        details = self.lsblk_all or disk.lsblk_all()
         if self.sys_api.get("partitions"):
             for part in self.sys_api.get("partitions").keys():
-                part = Device("/dev/%s" % part)
-                if part.is_ceph_disk_member:
-                    is_member = True
-                    break
-        return is_member
+                for dev in details:
+                    if dev['NAME'] == part:
+                        return is_member(dev)
+        else:
+            return is_member(self.disk_api)
+        raise RuntimeError(f"Couln't check if device {self.path} is a ceph-disk member.")
 
     @property
     def has_bluestore_label(self):
@@ -381,6 +428,7 @@ class Device(object):
 
     @property
     def device_type(self):
+        self.load_blkid_api()
         if self.disk_api:
             return self.disk_api['TYPE']
         elif self.blkid_api:
@@ -396,6 +444,7 @@ class Device(object):
 
     @property
     def is_partition(self):
+        self.load_blkid_api()
         if self.disk_api:
             return self.disk_api['TYPE'] == 'part'
         elif self.blkid_api:
@@ -404,6 +453,7 @@ class Device(object):
 
     @property
     def is_device(self):
+        self.load_blkid_api()
         api = None
         if self.disk_api:
             api = self.disk_api
@@ -423,6 +473,7 @@ class Device(object):
         Only correct for LVs, device mappers, and partitions. Will report a ``None``
         for raw devices.
         """
+        self.load_blkid_api()
         crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')]
         if self.is_lv:
             # if disk APIs are reporting this is encrypted use that:
index 88db0513817a95b6881939c426b3e1fe010fccbd..31d9edd690d0f93a4aa81664b8154cfc7c1ebb3c 100644 (file)
@@ -229,6 +229,11 @@ def _udevadm_info(device):
 
 
 def lsblk(device, columns=None, abspath=False):
+    return lsblk_all(device=device,
+                     columns=columns,
+                     abspath=abspath)
+
+def lsblk_all(device='', columns=None, abspath=False):
     """
     Create a dictionary of identifying values for a device using ``lsblk``.
     Each supported column is a key, in its *raw* format (all uppercase
@@ -241,6 +246,7 @@ def lsblk(device, columns=None, abspath=False):
 
          NAME  device name
         KNAME  internal kernel device name
+        PKNAME internal kernel parent device name
       MAJ:MIN  major:minor device number
        FSTYPE  filesystem type
    MOUNTPOINT  where the device is mounted
@@ -284,38 +290,46 @@ def lsblk(device, columns=None, abspath=False):
 
     Normal CLI output, as filtered by the flags in this function will look like ::
 
-        $ lsblk --nodeps -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT
+        $ lsblk -P -o NAME,KNAME,PKNAME,MAJ:MIN,FSTYPE,MOUNTPOINT
         NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/"
 
     :param columns: A list of columns to report as keys in its original form.
     :param abspath: Set the flag for absolute paths on the report
     """
     default_columns = [
-        'NAME', 'KNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', 'UUID',
-        'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE',
+        'NAME', 'KNAME', 'PKNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL',
+        'UUID', 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE',
         'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN',
         'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL'
     ]
-    device = device.rstrip('/')
     columns = columns or default_columns
-    # --nodeps -> Avoid adding children/parents to the device, only give information
-    #             on the actual device we are querying for
     # -P       -> Produce pairs of COLUMN="value"
     # -p       -> Return full paths to devices, not just the names, when ``abspath`` is set
     # -o       -> Use the columns specified or default ones provided by this function
-    base_command = ['lsblk', '--nodeps', '-P']
+    base_command = ['lsblk', '-P']
     if abspath:
         base_command.append('-p')
     base_command.append('-o')
     base_command.append(','.join(columns))
-    base_command.append(device)
+
     out, err, rc = process.call(base_command)
 
     if rc != 0:
-        return {}
+        raise RuntimeError(f"Error: {err}")
 
-    return _lsblk_parser(' '.join(out))
+    result = []
+
+    for line in out:
+        result.append(_lsblk_parser(line))
 
+    if not device:
+        return result
+
+    for dev in result:
+        if dev['NAME'] == os.path.basename(device):
+            return dev
+
+    raise RuntimeError(f"{device} not found in lsblk output")
 
 def is_device(dev):
     """
@@ -724,7 +738,7 @@ def is_locked_raw_device(disk_path):
     return 0
 
 
-def get_block_devs_lsblk():
+def get_block_devs_lsblk(device=''):
     '''
     This returns a list of lists with 3 items per inner list.
     KNAME - reflects the kernel device name , for example /dev/sda or /dev/dm-0
@@ -734,14 +748,15 @@ def get_block_devs_lsblk():
 
     '''
     cmd = ['lsblk', '-plno', 'KNAME,NAME,TYPE']
+    if device:
+        cmd.extend(['--nodeps', device])
     stdout, stderr, rc = process.call(cmd)
     # lsblk returns 1 on failure
     if rc == 1:
         raise OSError('lsblk returned failure, stderr: {}'.format(stderr))
     return [re.split(r'\s+', line) for line in stdout]
 
-
-def get_devices(_sys_block_path='/sys/block'):
+def get_devices(_sys_block_path='/sys/block', device=''):
     """
     Captures all available block devices as reported by lsblk.
     Additional interesting metadata like sectors, size, vendor,
@@ -754,7 +769,7 @@ def get_devices(_sys_block_path='/sys/block'):
 
     device_facts = {}
 
-    block_devs = get_block_devs_lsblk()
+    block_devs = get_block_devs_lsblk(device=device)
 
     for block in block_devs:
         devname = os.path.basename(block[0])