]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: do not call get_device_vgs() per devices 47348/head
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 19 Jul 2022 11:07:33 +0000 (11:07 +0000)
committerGuillaume Abrioux <gabrioux@redhat.com>
Fri, 29 Jul 2022 07:51:25 +0000 (09:51 +0200)
let's call `ceph_volume.api.lvm.get_all_devices_vgs` only one time instead
so we avoid a bunch of subprocess calls that slow down the process when running
`ceph-volume inventory` command.

Fixes: https://tracker.ceph.com/issues/56623
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit f5bd0f6da9a3c2f60df2ed6dfaa4c26831a3be37)

src/ceph-volume/ceph_volume/tests/util/test_device.py
src/ceph-volume/ceph_volume/util/device.py

index 540d5084d70eda47f1a6298059eb54a22c45e4fa..49a34d7eef1007399ba58aa9b3ecb3d38dd9b825 100644 (file)
@@ -65,9 +65,9 @@ class TestDevice(object):
         assert disk.vgs == []
 
     def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6,
                              vg_extent_size=1073741824)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         device_info(lsblk=lsblk)
         disk = device.Device("/dev/nvme0n1")
@@ -277,9 +277,9 @@ class TestDevice(object):
         assert disk.is_ceph_disk_member is False
 
     def test_existing_vg_available(self, fake_call, monkeypatch, device_info):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536,
                              vg_extent_size=4194304)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
@@ -289,9 +289,9 @@ class TestDevice(object):
         assert not disk.available_raw
 
     def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info):
-        vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4,
+        vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4,
                              vg_extent_size=1073741824)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
@@ -301,11 +301,11 @@ class TestDevice(object):
         assert not disk.available_raw
 
     def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info):
-        vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000,
+        vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000,
                              vg_extent_size=4194304)
-        vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536,
+        vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536,
                              vg_extent_size=4194304)
-        monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2])
+        monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2])
         lsblk = {"TYPE": "disk", "NAME": "nvme0n1"}
         data = {"/dev/nvme0n1": {"size": "6442450944"}}
         device_info(devices=data, lsblk=lsblk)
index f101f4a6a2b9d78cc90bf129ad40d2101908cb71..276cce9ff6174186e02465acafef8bfa2f2e2891 100644 (file)
@@ -320,13 +320,12 @@ class Device(object):
             # can each host a PV and VG. I think the vg_name property is
             # actually unused (not 100% sure) and can simply be removed
             vgs = None
+            if not self.all_devices_vgs:
+                self.all_devices_vgs = lvm.get_all_devices_vgs()
             for path in device_to_check:
-                if self.all_devices_vgs:
-                    for dev_vg in self.all_devices_vgs:
-                        if dev_vg.pv_name == path:
-                            vgs = [dev_vg]
-                else:
-                    vgs = lvm.get_device_vgs(path)
+                for dev_vg in self.all_devices_vgs:
+                    if dev_vg.pv_name == path:
+                        vgs = [dev_vg]
                 if vgs:
                     self.vgs.extend(vgs)
                     self.vg_name = vgs[0]