From 09287524b351d40e9d8df2935fe8157ffc915d88 Mon Sep 17 00:00:00 2001 From: David Galloway Date: Thu, 11 Dec 2025 18:32:46 -0500 Subject: [PATCH] testnode: Refactor lvm tag and support new dict Signed-off-by: David Galloway --- roles/testnode/README.rst | 31 ++++ roles/testnode/tasks/configure_lvm.yml | 236 ++++++++++++++++++++----- roles/testnode/tasks/lvm.yml | 3 +- 3 files changed, 223 insertions(+), 47 deletions(-) diff --git a/roles/testnode/README.rst b/roles/testnode/README.rst index e27e307a..7961b955 100644 --- a/roles/testnode/README.rst +++ b/roles/testnode/README.rst @@ -246,6 +246,36 @@ An optional dictionary of filesystems you want created and where to mount them. fstype: xfs mountpoint: "/var/cache/fscache" +A dictionary of drives to use as OSDs that you want provisioned with LVM. The name of the group under ``osd_selector_lvm`` can be whatever you want. Use this **or** ``volume_groups{}`` but not both!:: + + osd_selector_lvm: + nvme: + size_gb: 1700 + rotational: false + count: 2 + vg_name: vg_nvme + lvs: + - name: lv_1 + size: "400G" + scratch_dev: true + - name: lv_2 + size: "400G" + scratch_dev: true + - name: lv_3 + size: "400G" + scratch_dev: true + - name: lv_4 + size: "400G" + scratch_dev: true + - name: lv_5 + size: "100G" + scratch_dev: false + hdd: + min_size_gb: 8000 + max_size_gb: 14000 + rotational: true + count: 8 + A dictionary of volume groups you want created. ``pvs`` should be a comma-delimited list. Example:: volume_groups: @@ -279,6 +309,7 @@ Setting ``quick_lvs_to_create`` will: # Example would create 4 logical volumes each using 25% of a volume group created using all non-root physical volumes quick_lvs_to_create: 4 + Define ``check_for_nvme: true`` in Ansible inventory group_vars (by machine type) if the testnode should have an NVMe device. This will include a few tasks to verify an NVMe device is present. If the drive is missing, the tasks will mark the testnode down in the paddles_ lock database so the node doesn't repeatedly fail jobs. Defaults to false:: check_for_nvme: false diff --git a/roles/testnode/tasks/configure_lvm.yml b/roles/testnode/tasks/configure_lvm.yml index c4c06896..0b58835b 100644 --- a/roles/testnode/tasks/configure_lvm.yml +++ b/roles/testnode/tasks/configure_lvm.yml @@ -1,58 +1,202 @@ --- -- name: Set root disk +############################################################################### +# PRE-TASKS: Apply to both osd_selector and quick_lvs_to_create methods +############################################################################### + +- name: Set root disk (exclude OS disk) set_fact: - root_disk: "{{ item.device | regex_replace('p?[0-9]+$', '') | regex_replace('^/dev/', '') }}" - with_items: "{{ ansible_mounts }}" - when: - - item.mount == '/' - - quick_lvs_to_create is defined + root_disk: >- + {{ + item.device + | regex_replace('p?[0-9]+$', '') + | regex_replace('^/dev/', '') + }} + loop: "{{ ansible_mounts }}" + when: item.mount == '/' -- name: Combine list of non-root disks +- name: Build candidate data disks (non-root, non-loop/ram/dm) set_fact: - disks_for_vg: "{{ ansible_devices.keys() | sort | reject('match',root_disk) | reject('match','loop') | reject('match','ram') | reject('match','dm-') | map('regex_replace','^','/dev/') | join(',') }}" - when: quick_lvs_to_create is defined + candidate_disks: >- + {{ + ansible_devices + | dict2items + | rejectattr('key', 'equalto', root_disk | default('')) + | rejectattr('key', 'match', '^loop') + | rejectattr('key', 'match', '^ram') + | rejectattr('key', 'match', '^dm-') + | list + }} -- set_fact: vg_name=vg_hdd - when: - - disks_for_vg is defined - - "'nvme' not in disks_for_vg" +############################################################################### +# PATH 1: size-based selector (osd_selector_lvm{}) +############################################################################### + +- block: + - name: Init list of disks_with_size + set_fact: + disks_with_size: [] + + # Compute size_gb (handles GB and TB) + rotational flag + - name: Populate disks_with_size + set_fact: + disks_with_size: >- + {{ + disks_with_size + [ { + 'name': item.key, + 'size_gb': ( + ( + (item.value.size | regex_replace(' .*$', '') | float) * 1024 + ) + if ('TB' in (item.value.size | default(''))) + else + (item.value.size | regex_replace(' .*$', '') | float) + ), + 'rotational': item.value.rotational | default('0') + } ] + }} + loop: "{{ candidate_disks | default([]) }}" + + - name: Compute matching OSD disks by size + rotational + vars: + sel: "{{ osd_selector_lvm.nvme }}" + min_size: "{{ (sel.size_gb | float) - (sel.tolerance_gb | default(100) | float) }}" + max_size: "{{ (sel.size_gb | float) + (sel.tolerance_gb | default(100) | float) }}" + rotational_flag: "{{ sel.rotational | default(false) }}" + set_fact: + matching_osd_disks: >- + {{ + disks_with_size + | selectattr('size_gb', '>=', min_size | float) + | selectattr('size_gb', '<=', max_size | float) + | selectattr( + 'rotational', + 'equalto', + (rotational_flag | ternary('1', '0')) + ) + | map(attribute='name') + | list + | sort + }} + + - name: Take first N OSD disks + set_fact: + osd_devices: >- + {{ + matching_osd_disks[0:(osd_selector_lvm.nvme.count | default(1) | int)] + }} + + - name: Ensure we found enough OSD disks + fail: + msg: >- + Wanted {{ osd_selector_lvm.nvme.count }} disks of ~{{ osd_selector_lvm.nvme.size_gb }} GB + (rotational={{ osd_selector_lvm.nvme.rotational }}), but only matched + {{ osd_devices | length }}: {{ osd_devices | default([]) }} + when: osd_devices | length < (osd_selector_lvm.nvme.count | default(1) | int) + + - name: Build volume_groups from selected OSD disks + set_fact: + volume_groups: >- + {{ + (volume_groups | default({})) + | combine({ + osd_selector_lvm.nvme.vg_name: { + 'pvs': osd_devices + | map('regex_replace', '^', '/dev/') + | join(',') + } + }) + }} + + - name: Build logical_volumes from osd_selector_lvm LV layout + set_fact: + logical_volumes: >- + {{ + (logical_volumes | default({})) + | combine({ + item.name: { + 'vg': osd_selector_lvm.nvme.vg_name, + 'size': item.size, + 'scratch_dev': item.scratch_dev | default(false) + } + }) + }} + loop: "{{ osd_selector_lvm.nvme.lvs | default([]) }}" -- set_fact: vg_name=vg_nvme when: - - disks_for_vg is defined - - "'nvme' in disks_for_vg" + - osd_selector_lvm is defined + - osd_selector_lvm.nvme is defined -- name: Create volume_groups dict - set_fact: - volume_groups: - "{'{{ vg_name }}': {'pvs': '{{ disks_for_vg }}' }}" - when: vg_name is defined +############################################################################### +# PATH 2: legacy quick_lvs_to_create (equal % across all non-root disks) +############################################################################### -# This isn't perfect but with the |int at the end, this'll just round down -# if quick_lvs_to_create won't divide evenly to make sure the VG doesn't run out of space -- name: Determine desired logical volume percentage size - set_fact: - quick_lv_size: "{{ (100 / quick_lvs_to_create|int)|int }}" - when: quick_lvs_to_create is defined +- block: + - name: Set root disk + set_fact: + root_disk: "{{ item.device | regex_replace('p?[0-9]+$', '') | regex_replace('^/dev/', '') }}" + with_items: "{{ ansible_mounts }}" + when: item.mount == '/' -- name: Create logical_volumes dict - set_fact: - logical_volumes: - "{ - {%- for lv in range(quick_lvs_to_create|int) -%} - 'lv_{{ lv + 1 }}': - { - 'vg': '{{ vg_name }}', - 'size': '{{ quick_lv_size }}%VG', - 'scratch_dev': true - } - {%- if not loop.last -%} - , - {%- endif -%} - {%- endfor -%} - }" - when: quick_lvs_to_create is defined + - name: Combine list of non-root disks + set_fact: + disks_for_vg: >- + {{ + ansible_devices.keys() + | sort + | reject('match', root_disk) + | reject('match', 'loop') + | reject('match', 'ram') + | reject('match', 'dm-') + | map('regex_replace','^','/dev/') + | join(',') + }} + + - set_fact: vg_name=vg_hdd + when: + - disks_for_vg is defined + - "'nvme' not in disks_for_vg" + + - set_fact: vg_name=vg_nvme + when: + - disks_for_vg is defined + - "'nvme' in disks_for_vg" + + - name: Create volume_groups dict + set_fact: + volume_groups: + "{'{{ vg_name }}': {'pvs': '{{ disks_for_vg }}' }}" + when: vg_name is defined + + # This isn't perfect but with the |int at the end, this'll just round down + # if quick_lvs_to_create won't divide evenly to make sure the VG doesn't run out of space + - name: Determine desired logical volume percentage size + set_fact: + quick_lv_size: "{{ (100 / quick_lvs_to_create|int)|int }}" + + - name: Create logical_volumes dict + set_fact: + logical_volumes: + "{ + {%- for lv in range(quick_lvs_to_create|int) -%} + 'lv_{{ lv + 1 }}': + { + 'vg': '{{ vg_name }}', + 'size': '{{ quick_lv_size }}%VG', + 'scratch_dev': true + } + {%- if not loop.last -%} + , + {%- endif -%} + {%- endfor -%} + }" + + when: + - quick_lvs_to_create is defined + - osd_selector_lvm is not defined +############################################################################### +# COMMON: create VGs/LVs and /scratch_devs +############################################################################### - name: "Create volume group(s)" lvg: vg: "{{ item.key }}" @@ -80,9 +224,9 @@ create: yes owner: root group: root - mode: 0644 + mode: '0644' line: "/dev/{{ item.value.vg }}/{{ item.key }}" with_dict: "{{ logical_volumes }}" when: - logical_volumes is defined - - item.value.scratch_dev is defined + - item.value.scratch_dev | default(false) diff --git a/roles/testnode/tasks/lvm.yml b/roles/testnode/tasks/lvm.yml index 9767bbc9..686f0473 100644 --- a/roles/testnode/tasks/lvm.yml +++ b/roles/testnode/tasks/lvm.yml @@ -10,4 +10,5 @@ - import_tasks: configure_lvm.yml when: (logical_volumes is defined) or (volume_groups is defined) or - (quick_lvs_to_create is defined) + (quick_lvs_to_create is defined) or + (osd_selector_lvm is defined) -- 2.47.3