]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: do not call get_device_vgs() per devices 47168/head
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 19 Jul 2022 11:07:33 +0000 (11:07 +0000)
committerGuillaume Abrioux <gabrioux@redhat.com>
Tue, 19 Jul 2022 13:18:20 +0000 (13:18 +0000)
let's call `ceph_volume.api.lvm.get_all_devices_vgs` only one time instead
so we avoid a bunch of subprocess calls that slow down the process when running
`ceph-volume inventory` command.

Fixes: https://tracker.ceph.com/issues/56623
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
src/ceph-volume/ceph_volume/tests/util/test_device.py
src/ceph-volume/ceph_volume/util/device.py

index 9ad53f40fc557bb19d2b6e15acbe57c885f8b1c8..457142cbc661aaf73dc44e42b41798479b995b3c 100644 (file)
@@ -66,9 +66,9 @@ class TestDevice(object):
         assert disk.vgs == []
 
     def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6,
                              vg_extent_size=1073741824)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
@@ -294,9 +294,9 @@ class TestDevice(object):
         assert disk.is_ceph_disk_member is False
 
     def test_existing_vg_available(self, fake_call, monkeypatch, device_info):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536,
                              vg_extent_size=4194304)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
@@ -306,9 +306,9 @@ class TestDevice(object):
         assert not disk.available_raw
 
     def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4,
                              vg_extent_size=1073741824)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
@@ -318,11 +318,11 @@ class TestDevice(object):
         assert not disk.available_raw
 
     def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info):
-        vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000,
+        vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000,
                              vg_extent_size=4194304)
-        vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536,
+        vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536,
                              vg_extent_size=4194304)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
index 9bc633e723a0784353c96f2f5a05b74665511366..8ccb320238f46124b860903aae01250c3850cfde 100644 (file)
@@ -324,13 +324,12 @@ class Device(object):
             # can each host a PV and VG. I think the vg_name property is
             # actually unused (not 100% sure) and can simply be removed
             vgs = None
+            if not self.all_devices_vgs:
+                self.all_devices_vgs = lvm.get_all_devices_vgs()
             for path in device_to_check:
-                if self.all_devices_vgs:
-                    for dev_vg in self.all_devices_vgs:
-                        if dev_vg.pv_name == path:
-                            vgs = [dev_vg]
-                else:
-                    vgs = lvm.get_device_vgs(path)
+                for dev_vg in self.all_devices_vgs:
+                    if dev_vg.pv_name == path:
+                        vgs = [dev_vg]
                 if vgs:
                     self.vgs.extend(vgs)
                     self.vg_name = vgs[0]