From f5bd0f6da9a3c2f60df2ed6dfaa4c26831a3be37 Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 19 Jul 2022 11:07:33 +0000 Subject: [PATCH] ceph-volume: do not call get_device_vgs() per devices let's call `ceph_volume.api.lvm.get_all_devices_vgs` only one time instead so we avoid a bunch of subprocess calls that slow down the process when running `ceph-volume inventory` command. Fixes: https://tracker.ceph.com/issues/56623 Signed-off-by: Guillaume Abrioux --- .../ceph_volume/tests/util/test_device.py | 18 +++++++++--------- src/ceph-volume/ceph_volume/util/device.py | 11 +++++------ 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/ceph-volume/ceph_volume/tests/util/test_device.py b/src/ceph-volume/ceph_volume/tests/util/test_device.py index 9ad53f40fc557..457142cbc661a 100644 --- a/src/ceph-volume/ceph_volume/tests/util/test_device.py +++ b/src/ceph-volume/ceph_volume/tests/util/test_device.py @@ -66,9 +66,9 @@ class TestDevice(object): assert disk.vgs == [] def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") @@ -294,9 +294,9 @@ class TestDevice(object): assert disk.is_ceph_disk_member is False def test_existing_vg_available(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) @@ -306,9 +306,9 @@ class TestDevice(object): assert not disk.available_raw def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) @@ -318,11 +318,11 @@ class TestDevice(object): assert not disk.available_raw def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info): - vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000, + vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000, vg_extent_size=4194304) - vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536, + vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) diff --git a/src/ceph-volume/ceph_volume/util/device.py b/src/ceph-volume/ceph_volume/util/device.py index 9bc633e723a07..8ccb320238f46 100644 --- a/src/ceph-volume/ceph_volume/util/device.py +++ b/src/ceph-volume/ceph_volume/util/device.py @@ -324,13 +324,12 @@ class Device(object): # can each host a PV and VG. I think the vg_name property is # actually unused (not 100% sure) and can simply be removed vgs = None + if not self.all_devices_vgs: + self.all_devices_vgs = lvm.get_all_devices_vgs() for path in device_to_check: - if self.all_devices_vgs: - for dev_vg in self.all_devices_vgs: - if dev_vg.pv_name == path: - vgs = [dev_vg] - else: - vgs = lvm.get_device_vgs(path) + for dev_vg in self.all_devices_vgs: + if dev_vg.pv_name == path: + vgs = [dev_vg] if vgs: self.vgs.extend(vgs) self.vg_name = vgs[0] -- 2.39.5