From 9a4470a00beb29e9792a3c1f96963c2e334c083b Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 19 Jul 2022 11:07:33 +0000 Subject: [PATCH] ceph-volume: do not call get_device_vgs() per devices let's call `ceph_volume.api.lvm.get_all_devices_vgs` only one time instead so we avoid a bunch of subprocess calls that slow down the process when running `ceph-volume inventory` command. Fixes: https://tracker.ceph.com/issues/56623 Signed-off-by: Guillaume Abrioux (cherry picked from commit f5bd0f6da9a3c2f60df2ed6dfaa4c26831a3be37) --- .../ceph_volume/tests/util/test_device.py | 18 +++++++++--------- src/ceph-volume/ceph_volume/util/device.py | 11 +++++------ 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/ceph-volume/ceph_volume/tests/util/test_device.py b/src/ceph-volume/ceph_volume/tests/util/test_device.py index 540d5084d70ed..49a34d7eef100 100644 --- a/src/ceph-volume/ceph_volume/tests/util/test_device.py +++ b/src/ceph-volume/ceph_volume/tests/util/test_device.py @@ -65,9 +65,9 @@ class TestDevice(object): assert disk.vgs == [] def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=6, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} device_info(lsblk=lsblk) disk = device.Device("/dev/nvme0n1") @@ -277,9 +277,9 @@ class TestDevice(object): assert disk.is_ceph_disk_member is False def test_existing_vg_available(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) @@ -289,9 +289,9 @@ class TestDevice(object): assert not disk.available_raw def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info): - vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4, + vg = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=4, vg_extent_size=1073741824) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) @@ -301,11 +301,11 @@ class TestDevice(object): assert not disk.available_raw def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info): - vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000, + vg1 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=1000, vg_extent_size=4194304) - vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536, + vg2 = api.VolumeGroup(pv_name='/dev/nvme0n1', vg_name='foo/bar', vg_free_count=536, vg_extent_size=4194304) - monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2]) + monkeypatch.setattr(api, 'get_all_devices_vgs', lambda : [vg1, vg2]) lsblk = {"TYPE": "disk", "NAME": "nvme0n1"} data = {"/dev/nvme0n1": {"size": "6442450944"}} device_info(devices=data, lsblk=lsblk) diff --git a/src/ceph-volume/ceph_volume/util/device.py b/src/ceph-volume/ceph_volume/util/device.py index f101f4a6a2b9d..276cce9ff6174 100644 --- a/src/ceph-volume/ceph_volume/util/device.py +++ b/src/ceph-volume/ceph_volume/util/device.py @@ -320,13 +320,12 @@ class Device(object): # can each host a PV and VG. I think the vg_name property is # actually unused (not 100% sure) and can simply be removed vgs = None + if not self.all_devices_vgs: + self.all_devices_vgs = lvm.get_all_devices_vgs() for path in device_to_check: - if self.all_devices_vgs: - for dev_vg in self.all_devices_vgs: - if dev_vg.pv_name == path: - vgs = [dev_vg] - else: - vgs = lvm.get_device_vgs(path) + for dev_vg in self.all_devices_vgs: + if dev_vg.pv_name == path: + vgs = [dev_vg] if vgs: self.vgs.extend(vgs) self.vg_name = vgs[0] -- 2.39.5