From b81ba3d33993d15a6ad0e15aa83f5e701e9ab14c Mon Sep 17 00:00:00 2001 From: David Galloway Date: Sun, 12 Oct 2025 19:34:42 -0400 Subject: [PATCH] Foo DNM Signed-off-by: David Galloway --- maas.yml | 1 + roles/maas/defaults/main.yml | 7 + roles/maas/handlers/main.yml | 11 + roles/maas/tasks/_auth_header.yml | 18 + roles/maas/tasks/add_machines.yml | 22 - roles/maas/tasks/api_auth_pretasks.yml | 23 + roles/maas/tasks/config_dhcpd_subnet.yml | 12 +- roles/maas/tasks/machines.yml | 180 +++ roles/maas/tasks/machines.yml.cli | 1064 +++++++++++++++++ .../maas/tasks/machines/_apply_one_iface.yml | 435 +++++++ roles/maas/tasks/machines/_build_indexes.yml | 101 ++ .../tasks/machines/_create_vlan_on_parent.yml | 43 + roles/maas/tasks/machines/_ensure_bond.yml | 332 +++++ .../machines/_fetch_vlans_for_fabric.yml | 30 + roles/maas/tasks/machines/_mark_broken.yml | 63 + roles/maas/tasks/machines/_plan_sets.yml | 53 + roles/maas/tasks/machines/_read_machines.yml | 26 + .../tasks/machines/_refresh_iface_facts.yml | 110 ++ roles/maas/tasks/machines/cleanup.yml | 56 + roles/maas/tasks/machines/create.yml | 36 + roles/maas/tasks/machines/delete.yml | 6 + roles/maas/tasks/machines/set_ipmi_creds.yml | 106 ++ roles/maas/tasks/machines/update.yml | 41 + roles/maas/tasks/main.yml | 19 +- roles/maas/tasks/networking.yml | 432 +++++++ roles/maas/tasks/networking/domain_create.yml | 22 + roles/maas/tasks/networking/fabric_create.yml | 19 + .../tasks/networking/fabric_vlans_read.yml | 20 + roles/maas/tasks/networking/space_create.yml | 19 + roles/maas/tasks/networking/subnet_apply.yml | 355 ++++++ .../tasks/networking/subnet_range_create.yml | 225 ++++ .../tasks/networking/vlan_build_index.yml | 22 + roles/maas/tasks/networking/vlan_create.yml | 32 + roles/maas/tasks/networking/vlan_update.yml | 95 ++ roles/maas/tasks/networking_subnet.yml | 133 +++ 35 files changed, 4139 insertions(+), 30 deletions(-) create mode 100644 roles/maas/handlers/main.yml create mode 100644 roles/maas/tasks/_auth_header.yml delete mode 100644 roles/maas/tasks/add_machines.yml create mode 100644 roles/maas/tasks/api_auth_pretasks.yml create mode 100644 roles/maas/tasks/machines.yml create mode 100644 roles/maas/tasks/machines.yml.cli create mode 100644 roles/maas/tasks/machines/_apply_one_iface.yml create mode 100644 roles/maas/tasks/machines/_build_indexes.yml create mode 100644 roles/maas/tasks/machines/_create_vlan_on_parent.yml create mode 100644 roles/maas/tasks/machines/_ensure_bond.yml create mode 100644 roles/maas/tasks/machines/_fetch_vlans_for_fabric.yml create mode 100644 roles/maas/tasks/machines/_mark_broken.yml create mode 100644 roles/maas/tasks/machines/_plan_sets.yml create mode 100644 roles/maas/tasks/machines/_read_machines.yml create mode 100644 roles/maas/tasks/machines/_refresh_iface_facts.yml create mode 100644 roles/maas/tasks/machines/cleanup.yml create mode 100644 roles/maas/tasks/machines/create.yml create mode 100644 roles/maas/tasks/machines/delete.yml create mode 100644 roles/maas/tasks/machines/set_ipmi_creds.yml create mode 100644 roles/maas/tasks/machines/update.yml create mode 100644 roles/maas/tasks/networking.yml create mode 100644 roles/maas/tasks/networking/domain_create.yml create mode 100644 roles/maas/tasks/networking/fabric_create.yml create mode 100644 roles/maas/tasks/networking/fabric_vlans_read.yml create mode 100644 roles/maas/tasks/networking/space_create.yml create mode 100644 roles/maas/tasks/networking/subnet_apply.yml create mode 100644 roles/maas/tasks/networking/subnet_range_create.yml create mode 100644 roles/maas/tasks/networking/vlan_build_index.yml create mode 100644 roles/maas/tasks/networking/vlan_create.yml create mode 100644 roles/maas/tasks/networking/vlan_update.yml create mode 100644 roles/maas/tasks/networking_subnet.yml diff --git a/maas.yml b/maas.yml index f52a741b..7cbb992e 100644 --- a/maas.yml +++ b/maas.yml @@ -1,5 +1,6 @@ --- - hosts: maas roles: + - secrets - maas become: true diff --git a/roles/maas/defaults/main.yml b/roles/maas/defaults/main.yml index 4d899c71..5c900e7e 100644 --- a/roles/maas/defaults/main.yml +++ b/roles/maas/defaults/main.yml @@ -27,3 +27,10 @@ maas_ntp_external_only: "false" keys_repo: "https://github.com/ceph/keys" keys_branch: main keys_repo_path: "~/.cache/src/keys" + +# Should MAAS mark machines broken in order to update their network interface configurations in MAAS? +maas_force_machine_update: false + +# Override in secrets +maas_ipmi_username: ADMIN +maas_ipmi_password: ADMIN diff --git a/roles/maas/handlers/main.yml b/roles/maas/handlers/main.yml new file mode 100644 index 00000000..1e407788 --- /dev/null +++ b/roles/maas/handlers/main.yml @@ -0,0 +1,11 @@ +--- +- include_tasks: _auth_header.yml + listen: "Rebuild MAAS machine indexes" + +- name: Read machines from MAAS (handler) + listen: "Rebuild MAAS machine indexes" + include_tasks: machines/_read_machines.yml + +- name: Build machine indexes (handler) + listen: "Rebuild MAAS machine indexes" + include_tasks: machines/_build_indexes.yml diff --git a/roles/maas/tasks/_auth_header.yml b/roles/maas/tasks/_auth_header.yml new file mode 100644 index 00000000..3b93c81e --- /dev/null +++ b/roles/maas/tasks/_auth_header.yml @@ -0,0 +1,18 @@ +--- +# Build a FRESH OAuth header using the pre-encoded pieces from the pretasks. +# Requires: maas_ck_enc, maas_tk_enc, maas_sig_enc (set in api_auth_pretasks.yml) + +- name: Build OAuth header (fresh nonce/timestamp) + vars: + _nonce: "{{ lookup('community.general.random_string', length=24, upper=false, special=false) }}" + _ts: "{{ lookup('pipe', 'date +%s') }}" + set_fact: + maas_auth_header: >- + OAuth oauth_version="1.0", + oauth_signature_method="PLAINTEXT", + oauth_consumer_key="{{ maas_ck_enc }}", + oauth_token="{{ maas_tk_enc }}", + oauth_signature="{{ maas_sig_enc }}", + oauth_nonce="{{ _nonce | urlencode }}", + oauth_timestamp="{{ _ts }}" + no_log: true diff --git a/roles/maas/tasks/add_machines.yml b/roles/maas/tasks/add_machines.yml deleted file mode 100644 index 7fc1bfb4..00000000 --- a/roles/maas/tasks/add_machines.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Add all machines from inventory to MAAS - when: inventory_hostname in groups['maas_region_rack_server'] - tags: add_machines - block: - - name: Get existing machines in MAAS - command: "maas {{ maas_admin_username }} machines read" - register: existing_machines - - - name: Extract existing hostnames - set_fact: - existing_hostnames: "{{ existing_machines.stdout | from_json | map(attribute='hostname') | list }}" - - - name: Add Machines into MAAS - vars: - hostname: "{{ item.split('.')[0] }}" - mac_address: "{{ hostvars[item]['mac'] }}" - arch: "{{ hostvars[item]['arch'] }}" - when: hostname not in existing_hostnames and mac_address is defined and arch is defined - loop: "{{ groups['testnodes'] }}" - command: "maas {{ maas_admin_username }} machines create architecture={{ arch }} mac_addresses={{ mac_address }} hostname={{ item }} power_type=manual deployed=true" - diff --git a/roles/maas/tasks/api_auth_pretasks.yml b/roles/maas/tasks/api_auth_pretasks.yml new file mode 100644 index 00000000..7ae8f956 --- /dev/null +++ b/roles/maas/tasks/api_auth_pretasks.yml @@ -0,0 +1,23 @@ +--- +# Parse the MAAS API key ONCE and pre-encode the static OAuth pieces. + +- name: Bail if no MAAS key + assert: + that: + - maas_api_key is defined + - (maas_api_key | length) > 0 + fail_msg: "maas_api_key not available." + +# Split key: :: +- name: Parse MAAS API key once + set_fact: + maas_ck_raw: "{{ (maas_api_key.split(':'))[0] }}" + maas_tk_raw: "{{ (maas_api_key.split(':'))[1] }}" + maas_ts_raw: "{{ (maas_api_key.split(':'))[2] }}" + +# Pre-encode static values used in every header +- name: Pre-encode OAuth static pieces + set_fact: + maas_ck_enc: "{{ maas_ck_raw | urlencode }}" + maas_tk_enc: "{{ maas_tk_raw | urlencode }}" + maas_sig_enc: "{{ ('&' ~ maas_ts_raw) | urlencode }}" diff --git a/roles/maas/tasks/config_dhcpd_subnet.yml b/roles/maas/tasks/config_dhcpd_subnet.yml index 10687f8d..dd44bd7f 100644 --- a/roles/maas/tasks/config_dhcpd_subnet.yml +++ b/roles/maas/tasks/config_dhcpd_subnet.yml @@ -13,9 +13,9 @@ existing_start_ips: "{{ ip_ranges_raw.stdout | from_json | map(attribute='start_ip') | list }}" existing_end_ips: "{{ ip_ranges_raw.stdout | from_json | map(attribute='end_ip') | list }}" - - name: Create IP Range for {{ subnet_name }} subnet - command: "maas {{ maas_admin_username }} ipranges create type={{ subnet_data.ip_range_type }} start_ip={{ subnet_data.start_ip }} end_ip={{ subnet_data.end_ip }}" - when: subnet_data.start_ip not in existing_start_ips and subnet_data.end_ip not in existing_end_ips +# - name: Create IP Range for {{ subnet_name }} subnet +# command: "maas {{ maas_admin_username }} ipranges create type={{ subnet_data.ip_range_type }} start_ip={{ subnet_data.start_ip }} end_ip={{ subnet_data.end_ip }}" +# when: subnet_data.start_ip not in existing_start_ips and subnet_data.end_ip not in existing_end_ips - name: Read maas subnet information command: "maas {{ maas_admin_username }} subnet read {{ subnet_data.cidr }}" @@ -28,7 +28,8 @@ vlan_id: "{{ (subnet_info.stdout | from_json).id }}" - name: Enable DHCP on {{ subnet_name }} subnet - command: "maas {{ maas_admin_username }} vlan update {{ fabric_name }} {{ vlan_vid }} dhcp_on=True primary_rack={{ groups['maas_region_rack_server'][0].split('.')[0] }} secondary_rack={{ groups['maas_rack_server'][0].split('.')[0] }}" + #command: "maas {{ maas_admin_username }} vlan update {{ fabric_name }} {{ vlan_vid }} dhcp_on=True primary_rack={{ groups['maas_region_rack_server'][0].split('.')[0] }} secondary_rack={{ groups['maas_rack_server'][0].split('.')[0] }}" + command: "maas {{ maas_admin_username }} vlan update {{ fabric_name }} {{ vlan_vid }} dhcp_on=True" # This section creates the directory where the snippets are going to be copied @@ -87,6 +88,9 @@ src: dhcpd.hosts.snippet.j2 dest: "{{ snippets_path }}/{{ subnet_name }}_hosts_snippet" register: dhcp_hosts_config + + - pause: + minutes: 500 # This section decodes the snippet files and creates the variables to add them into MAAS diff --git a/roles/maas/tasks/machines.yml b/roles/maas/tasks/machines.yml new file mode 100644 index 00000000..2ce3194e --- /dev/null +++ b/roles/maas/tasks/machines.yml @@ -0,0 +1,180 @@ +--- +################################################################################ +# API base +################################################################################ +- name: Set MAAS API base URL + set_fact: + _maas_api: "{{ maas_api_url | trim('/') }}/MAAS/api/2.0" + +- include_tasks: _auth_header.yml + +- include_tasks: machines/_read_machines.yml + +- include_tasks: machines/_build_indexes.yml + +- name: Ensure short hostnames are unique in MAAS + fail: + msg: "Duplicate short hostnames found in MAAS: {{ (_short_names | difference(_short_names | unique)) | unique | join(', ') }}" + when: (_short_names | difference(_short_names | unique)) | length > 0 + +# (Shared) initialize the list of nodes we will un-break later +- name: Init shared _marked_broken list + set_fact: + _marked_broken: "{{ hostvars['localhost']._marked_broken | default([]) }}" + delegate_to: localhost + run_once: true + +- include_tasks: machines/_plan_sets.yml + +#- name: Include create.yml for missing hosts +# include_tasks: machines/create.yml +# loop: "{{ _create_names }}" +# loop_control: +# label: "{{ item }}" +# vars: +# host: "{{ item }}" +# system_id: "{{ maas_short_to_id[item] | default(omit) }}" +# # If inventory uses FQDNs, this resolves to the inventory hostname; else returns the short +# inv_host: "{{ (inventory_by_short | default({})).get(item, item) }}" +# +## desired_pool: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_machine_pool | default(_pool_from_groups) }}" +# desired_arch: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_arch | default(maas_arch | default('amd64/generic')) }}" +# desired_domain: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_domain | default(maas_domain | default(omit)) }}" +# mac_addresses: >- +# {{ +# (hostvars[(inventory_by_short | default({})).get(item, item)].maas_interfaces | default([])) +# | map(attribute='prefix') +# | map('regex_replace', '$', '_mac') +# | map('extract', hostvars[(inventory_by_short | default({})).get(item, item)]) +# | select('defined') +# | list +# }} +# tags: create_machines + +# CREATE: loop over SHORT names only +- name: Include create.yml for missing hosts + include_tasks: machines/create.yml + loop: "{{ _create_short }}" + loop_control: + label: "{{ item }}" + vars: + # short name we planned against + host: "{{ item }}" + + # creating: there should be no system_id; keep safe default + system_id: "{{ maas_short_to_id[item] | default(omit) }}" + + # resolve inventory host (FQDN if inventory uses it) + inv_host: "{{ (inventory_by_short | default({})).get(item, item) }}" + + desired_arch: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_arch + | default(maas_arch | default('amd64/generic')) }}" + desired_domain: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_domain + | default(maas_domain | default(omit)) }}" + + # collect MACs from inventory: for each iface prefix, read _mac var + mac_addresses: >- + {{ + (hostvars[(inventory_by_short | default({})).get(item, item)].maas_interfaces | default([])) + | map(attribute='prefix') + | map('regex_replace', '$', '_mac') + | map('extract', hostvars[(inventory_by_short | default({})).get(item, item)]) + | select('defined') + | list + }} + tags: create_machines + +# Create machines just creates a skeleton machine entry. +# We called a handler to re-read all the machines from MaaS and update +# the _update_names list. +- meta: flush_handlers + +- name: Set timestamp for when machines get marked broken + set_fact: + broken_at: "{{ lookup('pipe', 'date +%Y-%m-%d\\ %H:%M:%S') }}" + +- include_tasks: machines/_plan_sets.yml + +# UPDATE: loop over SHORT names only +- name: Include update.yml for existing hosts + include_tasks: machines/update.yml + loop: "{{ _update_short }}" + loop_control: + label: "{{ item }}" + vars: + # MAAS object for this short name (safe default to {}) + existing: "{{ maas_by_short[item] | default({}) }}" + + # updating requires a real system_id; keep strict so we notice problems + system_id: "{{ maas_short_to_id[item] }}" + + # status map may be absent during initial runs; keep safe default + system_status: "{{ maas_host_to_status[item] | default('Unknown') }}" + + host: "{{ item }}" + inv_host: "{{ (inventory_by_short | default({})).get(item, item) }}" + + desired_arch: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_arch + | default(maas_arch | default('amd64/generic')) }}" + desired_domain: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_domain + | default(maas_domain | default(omit)) }}" + tags: update_machines + +- pause: + +#- name: Include update.yml for existing hosts +# include_tasks: machines/update.yml +# loop: "{{ _update_names }}" +# loop_control: +# label: "{{ item }}" +# vars: +# existing: "{{ maas_by_short[item] | default({}) }}" +# system_id: "{{ maas_short_to_id[item] }}" +# system_status: "{{ maas_host_to_status[item] }}" +# host: "{{ item }}" +# # If inventory uses FQDNs, this resolves to the inventory hostname; else returns the short +# inv_host: "{{ (inventory_by_short | default({})).get(item, item) }}" +# +## desired_pool: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_machine_pool | default(_pool_from_groups) }}" +# desired_arch: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_arch | default(maas_arch | default('amd64/generic')) }}" +# desired_domain: "{{ hostvars[(inventory_by_short | default({})).get(item, item)].maas_domain | default(maas_domain | default(omit)) }}" +# tags: update_machines + +- include_vars: "{{ secrets_path }}/ipmi.yml" + failed_when: false + +# Apply IPMI creds for all hosts we can resolve to a system_id +- name: Include set_ipmi_creds.yml + include_tasks: machines/set_ipmi_creds.yml + loop: "{{ _ipmi_with_id | default([]) }}" + loop_control: + loop_var: ipmi_short + label: "{{ ipmi_short }}" + vars: + host: "{{ ipmi_short }}" # short name + system_id: "{{ maas_short_to_id[ipmi_short] }}" # guaranteed by pre-filter + # If inventory uses FQDNs, resolve to inventory hostname; else short + inv_host: "{{ (inventory_by_short | default({})).get(ipmi_short, ipmi_short) }}" + when: + - power_user is defined + - power_pass is defined + tags: + - ipmi + + +- name: Include delete.yml for extra hosts + include_tasks: machines/delete.yml + loop: "{{ _delete_names }}" + loop_control: + label: "{{ item }}" + vars: + host: "{{ item }}" + system_id: "{{ maas_short_to_id[item] }}" + # If inventory uses FQDNs, this resolves to the inventory hostname; else returns the short + inv_host: "{{ (inventory_by_short | default({})).get(item, item) }}" + when: (maas_delete_hosts | default(false)) | bool + +- name: Include cleanup.yml when we marked nodes broken + include_tasks: machines/cleanup.yml + when: _marked_broken | default([]) | length > 0 + run_once: true diff --git a/roles/maas/tasks/machines.yml.cli b/roles/maas/tasks/machines.yml.cli new file mode 100644 index 00000000..b5bbcaf5 --- /dev/null +++ b/roles/maas/tasks/machines.yml.cli @@ -0,0 +1,1064 @@ +--- +- name: Add all machines from inventory to MAAS + when: inventory_hostname in groups['maas_region_rack_server'] + tags: machines + block: + + - name: Read machines from MAAS + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", machines, read ] + register: maas_read + + - name: Parse MAAS machines JSON + ansible.builtin.set_fact: + maas_nodes_list: "{{ (maas_read.stdout | from_json) | list }}" + + - name: Init MAAS map + ansible.builtin.set_fact: + maas_by_hostname: {} + + - name: Populate MAAS map + vars: + boot_mac: >- + {{ + ( + (item.boot_interface.mac_address + if (item.boot_interface is defined and item.boot_interface and item.boot_interface.mac_address is defined) + else (item.interface_set | selectattr('mac_address','defined') | list | first).mac_address + ) | default('') + ) | lower + }} + boot_ip: >- + {{ + ( + ( + (item.boot_interface.links | selectattr('ip_address','defined') | list | first).ip_address + if (item.boot_interface is defined and item.boot_interface and item.boot_interface.links | default([])) + else (item.ip_addresses | first) + ) | default('') + ) + }} + loop: "{{ maas_nodes_list }}" + loop_control: { label: "{{ item.hostname | default('UNKNOWN') }}" } + ansible.builtin.set_fact: + maas_by_hostname: >- + {{ + maas_by_hostname | combine({ + (item.hostname | lower): { + 'system_id': item.system_id | default(''), + 'arch': item.architecture | default(''), + 'mac': boot_mac, + 'power_type': item.power_type | default(''), + 'ip': boot_ip, + 'status_name': item.status_name | default('') + } + }) + }} + + - name: Init desired inventory map + ansible.builtin.set_fact: + desired_by_hostname: {} + + - name: Populate desired map from inventory + vars: + node: "{{ item }}" + hostname: "{{ node.split('.')[0] | lower }}" + boot_mac_key: "{{ hostvars[node]['maas_boot_mac_var'] | default(maas_boot_mac_var | default('ext_pere_mac')) }}" + want_mac_raw: "{{ hostvars[node][boot_mac_key] | default('') }}" + want_mac: "{{ want_mac_raw | lower }}" + boot_ip_key: "{{ hostvars[node]['maas_boot_ip_var'] | default(maas_boot_ip_var | default('ext_pere_ip')) }}" + want_ip: "{{ hostvars[node][boot_ip_key] | default('') }}" + want_arch: "{{ hostvars[node].get('arch', hostvars[node].get('maas_arch', maas_arch | default('amd64/generic'))) }}" + want_power: "{{ 'ipmi' if (hostvars[node].ipmi is defined and hostvars[node].ipmi|length>0) else hostvars[node].get('power_type','manual') }}" + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.set_fact: + desired_by_hostname: >- + {{ + desired_by_hostname | combine({ + hostname: { + 'hostname': hostname, + 'mac': want_mac, + 'arch': want_arch, + 'power_type': want_power, + 'ip': want_ip, + 'ipmi_address': hostvars[node].ipmi | default(''), + 'current_state': (maas_by_hostname.get(hostname, {}).status_name | default('')) + } + }) + }} + + - name: Assert each node has boot MAC and arch + vars: + node: "{{ item }}" + hostname: "{{ node.split('.')[0] | lower }}" + boot_mac_key: "{{ hostvars[node]['maas_boot_mac_var'] | default(maas_boot_mac_var | default('ext_pere_mac')) }}" + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.assert: + that: + - hostvars[node][boot_mac_key] is defined + - (hostvars[node].get('arch', hostvars[node].get('maas_arch', maas_arch | default('amd64/generic')))) | string | length > 0 + + - name: Compute hosts to create + ansible.builtin.set_fact: + to_create: >- + {{ + (desired_by_hostname.keys() | difference(maas_by_hostname.keys())) + | map('extract', desired_by_hostname) + | list + }} + + # A) Try IPMI on each create-candidate + - name: Probe IPMI for create candidates + when: to_create | length > 0 + loop: "{{ to_create }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.ipmi_address | default('') }}" } + ansible.builtin.command: + argv: + - ipmitool + - -I + - lanplus + - -H + - "{{ item.ipmi_address }}" + - -U + - "{{ maas_ipmi_username }}" + - -P + - "{{ maas_ipmi_password }}" + - -N + - "1" + - -R + - "1" + - chassis + - power + - status + register: ipmi_probe_create + changed_when: false + failed_when: false + + - name: Build IPMI OK map for creates + when: ipmi_probe_create is defined + ansible.builtin.set_fact: + ipmi_ok_create_map: {} + + - name: Accumulate IPMI OK map for creates + when: ipmi_probe_create is defined + loop: "{{ ipmi_probe_create.results }}" + loop_control: { label: "{{ item.item.hostname }} rc={{ item.rc }}" } + ansible.builtin.set_fact: + ipmi_ok_create_map: >- + {{ + (ipmi_ok_create_map | default({})) + | combine({ (item.item.hostname): ((item.rc | int) == 0) }) + }} + + # C) Rewrite to_create so power_type is 'ipmi' only if ipmi_ok else 'manual' + # init an empty list we’ll fill + - name: Init effective create list + ansible.builtin.set_fact: + to_create_effective: [] + + # append each host with power_type decided by the probe result + - name: Build effective create list (ipmi if reachable else manual) + when: to_create | length > 0 + loop: "{{ to_create }}" + loop_control: { label: "{{ item.hostname }}" } + ansible.builtin.set_fact: + to_create_effective: >- + {{ + (to_create_effective | default([])) + + [ item | combine({ + 'power_type': (ipmi_ok_create_map | default({})).get(item.hostname, false) + | ternary('ipmi','manual') + }) ] + }} + + # replace the original list + - name: Apply effective create list + when: to_create_effective | length > 0 + ansible.builtin.set_fact: + to_create: "{{ to_create_effective }}" + + - name: Compute hosts to update + vars: + both_keys: "{{ desired_by_hostname.keys() | intersect(maas_by_hostname.keys()) }}" + diffs: >- + {%- set out = [] -%} + {%- for k in both_keys -%} + {%- set d = desired_by_hostname[k] -%} + {%- set m = maas_by_hostname[k] -%} + {%- set drift = [] -%} + {%- if (d.mac | default('')) != (m.mac | default('')) -%}{%- set _ = drift.append('mac') -%}{%- endif -%} + {%- if (d.arch | default('')) != (m.arch | default('')) -%}{%- set _ = drift.append('arch') -%}{%- endif -%} + {%- if (d.power_type | default('')) != (m.power_type | default('')) -%}{%- set _ = drift.append('power_type') -%}{%- endif -%} + {%- set ip_drift = ((d.ip | default('')) and ((d.ip | default('')) != (m.ip | default('')))) -%} + {%- if drift | length > 0 or ip_drift -%} + {%- set _ = out.append({ + 'hostname': k, + 'mac': d.mac, + 'arch': d.arch, + 'power_type': d.power_type, + 'want_ip': d.ip, + 'have_ip': m.ip | default(''), + 'ip_drift': ip_drift, + 'drift': drift, + 'system_id': m.system_id, + 'ipmi_address': d.ipmi_address | default('') + }) -%} + {%- endif -%} + {%- endfor -%} + {{ out }} + ansible.builtin.set_fact: + to_update: "{{ diffs }}" + + - name: Create missing machines in MAAS + when: to_create | length > 0 + loop: "{{ to_create }}" + loop_control: { label: "{{ item.hostname }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - machines + - create + - "architecture={{ item.arch }}" + - "mac_addresses={{ item.mac }}" + - "hostname={{ item.hostname }}" + - "power_type={{ item.power_type | default('manual') }}" +# - "deployed=true" + + - name: Re-read machines from MAAS after creates + when: to_create | default([]) | length > 0 + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", machines, read ] + register: maas_read_after_create + changed_when: false + + - name: Parse machines JSON (post-create) + when: maas_read_after_create is defined and (maas_read_after_create.stdout | default('')) | length > 0 + ansible.builtin.set_fact: + maas_nodes_list: "{{ (maas_read_after_create.stdout | from_json) | list }}" + + - name: Rebuild maas_by_hostname (post-create) + when: maas_read_after_create is defined and (maas_read_after_create.stdout | default('')) | length > 0 + vars: + boot_mac: >- + {{ + ( + (item.boot_interface.mac_address + if (item.boot_interface is defined and item.boot_interface and item.boot_interface.mac_address is defined) + else (item.interface_set | selectattr('mac_address','defined') | list | first).mac_address + ) | default('') | lower }} + boot_ip: >- + {{ + ( + ( + (item.boot_interface.links | selectattr('ip_address','defined') | list | first).ip_address + if (item.boot_interface is defined and item.boot_interface and item.boot_interface.links | default([])) + else (item.ip_addresses | first) + ) | default('') + ) + }} + loop: "{{ maas_nodes_list }}" + loop_control: { label: "{{ item.hostname | default('UNKNOWN') }}" } + ansible.builtin.set_fact: + maas_by_hostname: >- + {{ + (maas_by_hostname | default({})) | combine({ + (item.hostname | lower): { + 'system_id': item.system_id | default(''), + 'arch': item.architecture | default(''), + 'mac': boot_mac, + 'power_type': item.power_type | default(''), + 'ip': boot_ip, + 'status_name': item.status_name | default('') + } + }) + }} + + - name: Build desired physical MAC set per host + vars: + node: "{{ item }}" + hostname: "{{ node.split('.')[0] | lower }}" + # keys must come from the NODE (and coerce to strings to support names like "25Gb_2") + keys: "{{ (hostvars[node].maas_mac_keys | default([])) | map('string') | list }}" + # extract values safely, then default missing ones to '' + macs_raw: >- + {{ + (keys | map('extract', hostvars[node]) | list) + | map('default','') + | list + }} + desired_macs: "{{ macs_raw | reject('equalto','') | map('lower') | list | unique }}" + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.set_fact: + desired_phys_macs: "{{ (desired_phys_macs | default({})) | combine({ hostname: desired_macs }) }}" + + - name: Read MAAS interfaces for each host + vars: + hostname: "{{ item.split('.')[0] | lower }}" + sid: "{{ maas_by_hostname.get(hostname, {}).get('system_id') | default('') }}" + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }} (sid={{ sid | default('') }})" } + when: sid | length > 0 + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", interfaces, read, "{{ sid }}" ] + register: iface_reads + changed_when: false + + - name: Index existing physical interfaces by host (normalized) + ansible.builtin.set_fact: + existing_phys_by_host: >- + {{ + dict( + iface_reads.results + | selectattr('stdout','defined') + | map(attribute='item') | map('split','.') | map('first') | list + | zip( + iface_reads.results + | map(attribute='stdout') | map('from_json') | list + ) + ) + }} + + - name: Show desired vs existing MACs (debug) + vars: + h: "{{ item.split('.')[0] | lower }}" + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.debug: + msg: + desired: "{{ desired_phys_macs[h] | default([]) }}" + have: "{{ (existing_phys_by_host[h] | default([])) | selectattr('type','equalto','physical') | map(attribute='mac_address') | list }}" + + - name: Compute phys interface drift + mac->id per host + vars: + hostname: "{{ item }}" + interfaces: "{{ existing_phys_by_host[hostname] | default([]) }}" + phys_ifaces: "{{ interfaces | selectattr('type','equalto','physical') | list }}" + have_macs: "{{ phys_ifaces | map(attribute='mac_address') | map('lower') | list }}" + want_macs: "{{ desired_phys_macs[hostname] | default([]) }}" + mac_to_id: >- + {{ dict( + (phys_ifaces | map(attribute='mac_address') | map('lower') | list) + | zip(phys_ifaces | map(attribute='id') | list) + ) + }} + missing_macs: "{{ want_macs | difference(have_macs) }}" + extra_macs: "{{ have_macs | difference(want_macs) }}" + loop: "{{ (desired_phys_macs | default({})).keys() | list }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.set_fact: + iface_drift: "{{ (iface_drift | default({})) | combine({ hostname: { + 'missing': missing_macs, + 'extra': extra_macs, + 'mac_to_id': mac_to_id + }}) }}" + + - name: Build phys_create_list + ansible.builtin.set_fact: + phys_create_list: >- + {%- set out = [] -%} + {%- for h, want_macs in (desired_phys_macs | default({})).items() -%} + {%- set sid = (maas_by_hostname[h].system_id | default('')) -%} + {%- set missing = (iface_drift[h].missing | default([])) -%} + {%- for m in missing -%} + {%- set _ = out.append({'hostname': h, 'sid': sid, 'mac': m}) -%} + {%- endfor -%} + {%- endfor -%} + {{ out }} + + - name: Define allowed states for NIC changes + ansible.builtin.set_fact: + maas_allowed_states_for_phys: "{{ maas_allowed_states_for_phys | default(['New','Ready','Allocated','Broken']) }}" + + - name: Ensure status_name_map exists (hostname -> status_name) + when: status_name_map is not defined + ansible.builtin.set_fact: + status_name_map: >- + {{ + dict( + (maas_nodes_list | map(attribute='hostname') | map('lower') | list) + | zip(maas_nodes_list | map(attribute='status_name') | list) + ) + }} + + - name: Split phys_create_list by eligibility (simple & clear) + ansible.builtin.set_fact: + phys_create_eligible: [] + phys_create_ineligible: [] + + - name: Accumulate phys_create elig / inelig + vars: + eligible_states: "{{ maas_allowed_states_for_phys }}" + st: "{{ status_name_map.get(item.hostname) | default('') }}" + loop: "{{ phys_create_list | default([]) }}" + loop_control: { label: "{{ item.hostname }} -> {{ st }}" } + ansible.builtin.set_fact: + phys_create_eligible: "{{ phys_create_eligible + [item] if st in eligible_states else phys_create_eligible }}" + phys_create_ineligible: "{{ phys_create_ineligible + [item] if st not in eligible_states else phys_create_ineligible }}" + + - name: Create missing physical interfaces in MAAS (eligible hosts) + when: phys_create_eligible | length > 0 + loop: "{{ phys_create_eligible }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.mac }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - interfaces + - create-physical + - "{{ item.sid }}" + - "mac_address={{ item.mac }}" + register: phys_create_results + changed_when: true + + - name: Re-read interfaces after physical creates + when: phys_create_eligible | length > 0 + loop: "{{ phys_create_eligible | map(attribute='sid') | unique | list }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", interfaces, read, "{{ item }}" ] + register: iface_reads_after_phys_create + changed_when: false + + - name: Record phys-create skipped due to state (force=false) + when: + - not (maas_force_machine_update | default(false) | bool) + - (phys_create_ineligible | length) > 0 + ansible.builtin.set_fact: + machines_skipped_due_to_state: >- + {{ + (machines_skipped_due_to_state | default([])) + + (phys_create_ineligible | map(attribute='hostname') | list) + }} + + - name: "Mark {{ item }} broken to update physical interfaces" + when: + - (maas_force_machine_update | default(false) | bool) + - (phys_create_ineligible | length) > 0 + loop: "{{ phys_create_ineligible | map(attribute='sid') | unique | list }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", machine, mark-broken, "{{ item }}" ] + register: phys_force_mark_broken + failed_when: > + (phys_force_mark_broken.rc != 0) + and ('No rack controllers can access the BMC' not in (phys_force_mark_broken.stdout | default(''))) + changed_when: true + + - name: Create physical interfaces (while broken) + when: + - (maas_force_machine_update | default(false) | bool) + - (phys_create_ineligible | length) > 0 + loop: "{{ phys_create_ineligible }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.mac }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - interfaces + - create-physical + - "{{ item.sid }}" + - "mac_address={{ item.mac }}" + register: phys_force_create_results + changed_when: true + + - name: Mark fixed after physical interface create + when: + - (maas_force_machine_update | default(false) | bool) + - (phys_create_ineligible | length) > 0 + loop: "{{ phys_create_ineligible | map(attribute='sid') | unique | list }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", machine, mark-fixed, "{{ item }}" ] + register: phys_force_mark_fixed + failed_when: > + (phys_force_mark_fixed.rc != 0) + and ('No rack controllers can access the BMC' not in (phys_force_mark_fixed.stdout | default(''))) + changed_when: true + + - name: Read interfaces for bond scan + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + vars: + h: "{{ item.split('.')[0] | lower }}" + sid: "{{ maas_by_hostname[h].system_id | default(omit) }}" + when: sid is defined + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", interfaces, read, "{{ sid }}" ] + register: bond_scan + changed_when: false + + - name: Init bond maps + ansible.builtin.set_fact: + current_bonds_map: {} + current_bond_members: {} + + - name: Build current bond maps (per host) + loop: "{{ bond_scan.results | selectattr('stdout','defined') | list }}" + loop_control: + label: "{{ item.item.split('.')[0] | lower }}" + vars: + h: "{{ item.item.split('.')[0] | lower }}" + bonds: "{{ (item.stdout | from_json) | selectattr('type','equalto','bond') | list }}" + bond_names: "{{ bonds | map(attribute='name') | list }}" + bond_ids: "{{ bonds | map(attribute='id') | list }}" + bond_parents: "{{ bonds | map(attribute='parents') | list }}" + name_to_id: "{{ dict(bond_names | zip(bond_ids)) }}" + id_to_parents: "{{ dict(bond_ids | zip(bond_parents)) }}" + ansible.builtin.set_fact: + current_bonds_map: "{{ current_bonds_map | combine({ h: name_to_id }) }}" + current_bond_members: "{{ current_bond_members | combine({ h: id_to_parents }) }}" + + - name: Ensure bond action lists exist + ansible.builtin.set_fact: + bond_create_list: "{{ bond_create_list | default([]) }}" + bond_update_list: "{{ bond_update_list | default([]) }}" + + - name: Compute bond actions per host + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + vars: + node: "{{ item }}" + h: "{{ node.split('.')[0] | lower }}" + sid: "{{ maas_by_hostname[h].system_id | default('') }}" + want_bonds: "{{ hostvars[node].maas_bonds | default([]) }}" + mac_to_id: "{{ iface_drift[h].mac_to_id | default({}) }}" + have_bonds: "{{ current_bonds_map.get(h, {}) }}" + ansible.builtin.set_fact: + bond_create_list: >- + {%- set out = bond_create_list | default([]) -%} + {%- for b in want_bonds -%} + {%- set parent_macs = (b.interfaces | default([])) | map('extract', hostvars[node]) | map('lower') | list -%} + {%- set parent_ids = parent_macs | map('extract', mac_to_id) | select('defined') | list -%} + {%- if b.name not in have_bonds.keys() -%} + {%- set _ = out.append({ + 'hostname': h, + 'sid': sid, + 'name': b.name, + 'mode': b.mode | default('802.3ad'), + 'mtu': b.mtu | default(9000), + 'parent_ids': parent_ids + }) -%} + {%- endif -%} + {%- endfor -%} + {{ out }} + bond_update_list: >- + {%- set out = bond_update_list | default([]) -%} + {%- for b in want_bonds -%} + {%- if b.name in have_bonds.keys() -%} + {%- set parent_macs = (b.interfaces | default([])) | map('extract', hostvars[node]) | map('lower') | list -%} + {%- set parent_ids = parent_macs | map('extract', mac_to_id) | select('defined') | list -%} + {%- set _ = out.append({ + 'hostname': h, + 'sid': sid, + 'name': b.name, + 'mode': b.mode | default('802.3ad'), + 'mtu': b.mtu | default(9000), + 'parent_ids': parent_ids, + 'have_bond_id': have_bonds[b.name] + }) -%} + {%- endif -%} + {%- endfor -%} + {{ out }} + + - name: Define allowed MAAS states for bond changes + ansible.builtin.set_fact: + maas_allowed_states_for_bonds: ['New','Ready','Allocated','Broken'] + + + - name: Build eligibility lists for bond changes + vars: + eligible_hosts: >- + {{ + status_name_map | dict2items + | selectattr('value','in', maas_allowed_states_for_bonds) + | map(attribute='key') | list + }} + all_bond_hosts: >- + {{ + ( + (bond_create_list | default([])) + (bond_update_list | default([])) + ) + | map(attribute='hostname') | list + | unique | list + }} + ansible.builtin.set_fact: + bond_create_eligible: "{{ (bond_create_list | default([])) | selectattr('hostname','in', eligible_hosts) | list }}" + bond_update_eligible: "{{ (bond_update_list | default([])) | selectattr('hostname','in', eligible_hosts) | list }}" + bond_ineligible_hosts: "{{ all_bond_hosts | difference(eligible_hosts) | list }}" + + - name: Recompute desired parent IDs for each bond update + when: bond_update_eligible | length > 0 + loop: "{{ bond_update_eligible }}" + loop_control: + label: "{{ item.hostname }} -> {{ item.name }}" + vars: + hostname: "{{ item.hostname }}" + # get this host's bond definition from inventory/group_vars + bond_cfg: >- + {{ + (hostvars[hostname].maas_bonds | default([])) + | selectattr('name','equalto', item.name) | first | default({}) + }} + # the inventory keys for this bond (e.g. ['ext_pere_mac','25Gb_2']) + mac_keys: "{{ bond_cfg.interfaces | default([]) }}" + # resolve keys -> MACs from that host, normalize/lower, drop empties + macs_for_bond: >- + {{ + mac_keys + | map('extract', hostvars[hostname]) | map('default','') + | map('lower') | reject('equalto','') | list + }} + # existing interface id map for this host: mac(lower) -> id + id_by_mac: "{{ iface_drift[hostname].mac_to_id | default({}) }}" + desired_parent_ids: >- + {{ + macs_for_bond + | map('extract', id_by_mac, None) + | reject('equalto', None) + | map('string') | unique | sort | list + }} + ansible.builtin.set_fact: + bond_update_argvs: >- + {{ + (bond_update_argvs | default([])) + + [ { + 'sid': item.sid, + 'bond_id': item.have_bond_id, + 'argv': [ + 'maas', maas_admin_username, 'interface', 'update', + item.sid, (item.have_bond_id | string), + 'parents=' ~ (desired_parent_ids | join(',')), + 'bond_mode=' ~ (item.mode | default('802.3ad')), + 'mtu=' ~ (item.mtu | default(9000) | string) + ] + } ] + }} + + - name: Apply bond parents/mode/mtu (idempotent) + when: (bond_update_argvs | default([])) | length > 0 + loop: "{{ bond_update_argvs }}" + loop_control: { label: "{{ item.sid }} -> bond {{ item.bond_id }}" } + vars: + # item.argv currently has base pieces; rebuild with repeated parents= + parents_ids: >- + {{ + (item.argv | last) is string and (item.argv | last) is search('^parents=') + | ternary( (item.argv | last | regex_replace('^parents=', '')).split(','), + [] ) + }} + parents_args: "{{ parents_ids | map('string') | map('regex_replace','^(.*)$','parents=\\1') | list }}" + base_args: "{{ ['maas', maas_admin_username, 'interface', 'update', item.sid, (item.bond_id | string)] }}" + final_argv: "{{ base_args + parents_args + ['bond_mode=802.3ad', 'mtu=9000'] }}" + ansible.builtin.command: + argv: "{{ final_argv }}" + register: bond_parent_updates + changed_when: true + + - name: Record machines skipped due to state (force=false) + when: + - not (maas_force_machine_update | default(false) | bool) + - bond_ineligible_hosts | length > 0 + ansible.builtin.set_fact: + bond_skipped_due_to_state: >- + {{ (bond_skipped_due_to_state | default([])) + bond_ineligible_hosts }} + + - name: Create bonds (machines in modifiable state) + when: bond_create_eligible | length > 0 + loop: "{{ bond_create_eligible }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.name }}" } + vars: + parents_args: >- + {{ + item.parent_ids + | map('string') + | map('regex_replace','^(.*)$','parents=\\1') + | list + }} + argv_final: >- + {{ + ['maas', maas_admin_username, 'interfaces', 'create-bond', + item.sid, 'name=' ~ item.name, + 'bond_mode=' ~ (item.mode | default('802.3ad'))] + + parents_args + + ['mtu=' ~ (item.mtu | default(9000) | string)] + + ((item.vlan is defined) | ternary(['vlan=' ~ (item.vlan | string)], [])) + }} + ansible.builtin.command: + argv: "{{ argv_final }}" + register: bond_create_results + changed_when: true + + - name: Update bonds (machine in modifiable state) + when: bond_update_eligible | length > 0 + loop: "{{ bond_update_eligible }}" + loop_control: + label: "{{ item.hostname }} -> {{ item.name }} (id={{ item.have_bond_id }})" + vars: + parents_args: >- + {{ + item.parent_ids + | map('string') + | map('regex_replace','^(.*)$','parents=\1') + | list + }} + argv_final: >- + {{ + ['maas', maas_admin_username, 'interface', 'update', + item.sid, (item.have_bond_id | string)] + + parents_args + + ['bond_mode=' ~ (item.mode | default('802.3ad')), + 'mtu=' ~ (item.mtu | default(9000) | string)] + }} + ansible.builtin.command: + argv: "{{ argv_final }}" + register: bond_update_calls + changed_when: true + + - name: Build force lists (ineligible hosts only, when forcing) + when: (maas_force_machine_update | default(false) | bool) + vars: + bond_create_force_hosts: "{{ (bond_create_list | default([])) | map(attribute='hostname') | list | unique | list | difference(bond_create_eligible | map(attribute='hostname') | list | unique | list) }}" + bond_update_force_hosts: "{{ (bond_update_list | default([])) | map(attribute='hostname') | list | unique | list | difference(bond_update_eligible | map(attribute='hostname') | list | unique | list) }}" + ansible.builtin.set_fact: + bond_create_force: "{{ (bond_create_list | default([])) | selectattr('hostname','in', bond_create_force_hosts) | list }}" + bond_update_force: "{{ (bond_update_list | default([])) | selectattr('hostname','in', bond_update_force_hosts) | list }}" + force_hosts_unique: "{{ (bond_create_force_hosts + bond_update_force_hosts) | unique | list }}" + + - name: Mark machines broken for forced bond updates + when: + - (maas_force_machine_update | default(false) | bool) + - force_hosts_unique | length > 0 + loop: "{{ force_hosts_unique }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - machine + - mark-broken + - "{{ maas_by_hostname[item].system_id }}" + register: mark_broken_result + # Treat only *other* non-zero failures as fatal + failed_when: > + (mark_broken_result.rc != 0) and + ('No rack controllers can access the BMC of node' not in (mark_broken_result.stdout | default(''))) and + ('No rack controllers can access the BMC of machine' not in (mark_broken_result.stdout | default(''))) + # Still count as "changed" so downstream tasks run + changed_when: > + (mark_broken_result.rc == 0) or + ('No rack controllers can access the BMC' in (mark_broken_result.stdout | default(''))) + + - name: Create bonds (forced, machine temporarily broken) + when: + - (maas_force_machine_update | default(false) | bool) + - bond_create_force | length > 0 + loop: "{{ bond_create_force }}" + loop_control: + label: "{{ item.hostname }} -> {{ item.name }}" + vars: + parents_csv: "{{ item.parent_ids | map('string') | join(',') }}" + bond_create_argv: >- + {{ + [ + 'maas', + maas_admin_username, + 'interfaces', + 'create-bond', + item.sid, + 'name=' ~ item.name, + 'bond_mode=' ~ (item.mode | default('802.3ad')), + 'parents=' ~ parents_csv, + 'mtu=' ~ (item.mtu | default(9000) | string) + ] + + ((item.vlan is defined) | ternary(['vlan=' ~ (item.vlan | string)], [])) + }} + ansible.builtin.command: + argv: "{{ bond_create_argv }}" + register: bond_create_force_results + changed_when: true + + # Read all fabrics and vlans to build a vid -> vlan_id map + - name: Read fabrics + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", fabrics, read ] + register: maas_fabrics + changed_when: false + + - name: Build list of fabric IDs + ansible.builtin.set_fact: + fabric_ids: "{{ (maas_fabrics.stdout | from_json) | map(attribute='id') | list | unique }}" + + - name: Read VLANs for each fabric + loop: "{{ fabric_ids }}" + loop_control: { label: "fabric={{ item }}" } + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", vlans, read, "{{ item }}" ] + register: maas_vlans_reads + changed_when: false + + - name: Build vid -> vlan_id map + ansible.builtin.set_fact: + vid_to_vlan_id: >- + {{ + dict( + (maas_vlans_reads.results | map(attribute='stdout') | map('from_json') | list) + | sum(start=[]) + | map(attribute='vid') | list + | zip( + (maas_vlans_reads.results | map(attribute='stdout') | map('from_json') | list) + | sum(start=[]) + | map(attribute='id') | list + ) + ) + }} + + # For each bond needing a VLAN, compute the MAAS VLAN id + - name: Build bond->vlan_id updates + when: bond_update_eligible | length > 0 + loop: "{{ bond_update_eligible }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.name }}" } + vars: + bond_cfg: "{{ (maas_bonds | selectattr('name','equalto', item.name) | first) | default({}) }}" + desired_vid: "{{ bond_cfg.vlan | default(None) }}" + vlan_id: "{{ (desired_vid is not none) | ternary(vid_to_vlan_id.get(desired_vid), None) }}" + ansible.builtin.set_fact: + bond_vlan_updates: >- + {{ + (bond_vlan_updates | default([])) + + ([{ + 'sid': item.sid, + 'bond_id': item.have_bond_id, + 'vlan_id': vlan_id + }] if vlan_id is not none else []) + }} + + - name: Attach bond to VLAN (set vlan=VLAN_ID) + when: (bond_vlan_updates | default([])) | length > 0 + loop: "{{ bond_vlan_updates }}" + loop_control: + label: "{{ item.sid }} -> bond {{ item.bond_id }} vlan={{ item.vlan_id }}" + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - interface + - update + - "{{ item.sid }}" + - "{{ item.bond_id }}" + - "vlan={{ item.vlan_id }}" + register: bond_vlan_set + changed_when: true + + - name: Update bonds (forced, machine temporarily broken) + when: + - (maas_force_machine_update | default(false) | bool) + - bond_update_force | length > 0 + - member_drift or (item.mode is defined) or (item.mtu is defined) + loop: "{{ bond_update_force }}" + loop_control: { label: "{{ item.hostname }} -> {{ item.name }} (id={{ item.have_bond_id }})" } + vars: + parents_csv: "{{ item.parent_ids | map('string') | join(',') }}" + bond_update_force_argv: >- + {{ + [ + 'maas', + maas_admin_username, + 'interface', + 'update', + item.sid, + (item.have_bond_id | string), + 'bond_mode=' ~ (item.mode | default('802.3ad')), + 'parents=' ~ parents_csv, + 'mtu=' ~ (item.mtu | default(9000) | string) + ] + }} + ansible.builtin.command: + argv: "{{ bond_update_force_argv }}" + register: bond_update_force_results + changed_when: true + + - name: Mark machines fixed after forced bond updates + when: + - (maas_force_machine_update | default(false) | bool) + - force_hosts_unique | length > 0 + loop: "{{ force_hosts_unique }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - machine + - mark-fixed + - "{{ maas_by_hostname[item].system_id }}" + register: mark_fixed_result + failed_when: > + (mark_fixed_result.rc != 0) and + ('No rack controllers can access the BMC of node' not in (mark_fixed_result.stdout | default(''))) and + ('No rack controllers can access the BMC of machine' not in (mark_fixed_result.stdout | default(''))) + changed_when: > + (mark_fixed_result.rc == 0) or + ('No rack controllers can access the BMC' in (mark_fixed_result.stdout | default(''))) + + - name: Read machine details to inspect power parameters + vars: + hostname: "{{ item.split('.')[0] | lower }}" + sid: "{{ maas_by_hostname.get(hostname, {}).get('system_id') | default(omit) }}" + when: sid is defined + loop: "{{ groups['testnodes'] | default([]) }}" + loop_control: { label: "{{ item }}" } + ansible.builtin.command: + argv: [ maas, "{{ maas_admin_username }}", machine, read, "{{ sid }}" ] + register: machine_reads + changed_when: false + + - name: Build map of current power settings + when: machine_reads is defined and machine_reads.results is defined + ansible.builtin.set_fact: + power_map: >- + {{ + dict( + machine_reads.results + | selectattr('stdout','defined') + | map(attribute='item') + | map('split','.') | map('first') | list + | zip(machine_reads.results | map(attribute='stdout') | map('from_json')) + ) + }} + + - name: Select update candidates + ansible.builtin.set_fact: + update_candidates: >- + {{ + to_update + | selectattr('drift', 'defined') + | selectattr('drift', 'ne', []) + | list + }} + + # A) Try IPMI for each update-candidate that *wants* ipmi and has an address + - name: Probe IPMI for update candidates + loop: "{{ update_candidates }}" + loop_control: { label: "{{ item.hostname }} -> {{ ipmi_addr }}" } + vars: + ipmi_addr: "{{ desired_by_hostname[item.hostname].ipmi_address | default('') }}" + ansible.builtin.command: + argv: + - ipmitool + - -I + - lanplus + - -H + - "{{ ipmi_addr }}" + - -U + - "{{ maas_ipmi_username }}" + - -P + - "{{ maas_ipmi_password }}" + - -N + - "1" + - -R + - "1" + - chassis + - power + - status + register: ipmi_probe_update + changed_when: false + failed_when: false + when: + - update_candidates | default([]) | length > 0 + - (item.power_type | default('manual')) == 'ipmi' + - ipmi_addr | length > 0 + - maas_ipmi_username is defined + - maas_ipmi_password is defined + + # B) Build "hostname -> ipmi_ok" (rc == 0) lookup for updates + - name: Init IPMI OK map for updates + when: ipmi_probe_update is defined + ansible.builtin.set_fact: + ipmi_ok_update_map: {} + + - name: Accumulate IPMI OK map for updates + when: ipmi_probe_update is defined + loop: "{{ ipmi_probe_update.results }}" + loop_control: { label: "{{ item.item.hostname }} rc={{ item.rc }}" } + ansible.builtin.set_fact: + ipmi_ok_update_map: >- + {{ + (ipmi_ok_update_map | default({})) + | combine({ (item.item.hostname): ((item.rc | int) == 0) }) + }} + + # C) Produce update list with an *effective* power_type (ipmi if ok, else manual) + - name: Init effective update list + ansible.builtin.set_fact: + update_candidates_effective: [] + + - name: Compute effective power_type for updates + when: update_candidates | default([]) | length > 0 + loop: "{{ update_candidates }}" + loop_control: { label: "{{ item.hostname }}" } + ansible.builtin.set_fact: + update_candidates_effective: >- + {{ + (update_candidates_effective | default([])) + + [ item | combine({ + 'ipmi_address': (desired_by_hostname[item.hostname].ipmi_address | default('')), + 'effective_power_type': + ( + ((item.power_type | default('manual')) == 'ipmi') + and (ipmi_ok_update_map | default({})).get(item.hostname, false) + ) + | ternary('ipmi','manual') + }) ] + }} + + - name: Update machines (ipmi reachable) + when: update_candidates_effective | selectattr('effective_power_type','equalto','ipmi') | list | length > 0 + loop: "{{ update_candidates_effective | selectattr('effective_power_type','equalto','ipmi') | list }}" + loop_control: { label: "{{ item.hostname }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - machine + - update + - "{{ item.system_id }}" + - "hostname={{ item.hostname }}" + - "architecture={{ item.arch }}" + - "power_type=ipmi" + - "mac_addresses={{ item.mac }}" + - "power_parameters_power_address={{ item.ipmi_address | default('') }}" + - "power_parameters_power_user={{ maas_ipmi_username }}" + - "power_parameters_power_pass={{ maas_ipmi_password }}" + + - name: Update machines (fallback to manual) + when: update_candidates_effective | selectattr('effective_power_type','equalto','manual') | list | length > 0 + loop: "{{ update_candidates_effective | selectattr('effective_power_type','equalto','manual') | list }}" + loop_control: { label: "{{ item.hostname }}" } + ansible.builtin.command: + argv: + - maas + - "{{ maas_admin_username }}" + - machine + - update + - "{{ item.system_id }}" + - "hostname={{ item.hostname }}" + - "architecture={{ item.arch }}" + - "power_type=manual" + - "mac_addresses={{ item.mac }}" + + - name: These machines need to be updated but were skipped for being in the wrong state + run_once: true + when: + - not (maas_force_machine_update | default(false) | bool) + - ((bond_skipped_due_to_state | default([])) | length > 0) or + ((machines_skipped_due_to_state | default([])) | length > 0) + ansible.builtin.debug: + msg: >- + These machines need to be updated but were skipped for being in the wrong state: + {{ + ((bond_skipped_due_to_state | default([])) + (machines_skipped_due_to_state | default([]))) + | unique | sort | list + }} diff --git a/roles/maas/tasks/machines/_apply_one_iface.yml b/roles/maas/tasks/machines/_apply_one_iface.yml new file mode 100644 index 00000000..cef9cac1 --- /dev/null +++ b/roles/maas/tasks/machines/_apply_one_iface.yml @@ -0,0 +1,435 @@ +--- +# --- TOP OF _apply_interfaces.yml (ADAPTED FOR NEW SCHEMA) --- + +# Fresh auth (nonce) for any API calls in this include +- include_tasks: ../_auth_header.yml + +# Normalize incoming iface object; never use a loop var named "iface" anywhere. +- name: Normalize iface object + set_fact: + iface: "{{ iface_obj }}" + +# Ensure we have a vlan map; if empty, fetch it from MAAS +- name: Ensure vlan map exists + set_fact: + _vlan_by_vid: "{{ _vlan_by_vid | default({}) }}" + +- name: Read all fabrics (for VLAN lookup) + when: (_vlan_by_vid | length) == 0 + uri: + url: "{{ _maas_api }}/fabrics/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: yes + status_code: 200 + register: _fabrics_resp + no_log: true + +# Flatten all VLANs from every fabric into one list +- name: Collect all VLANs from fabrics payload + when: + - (_vlan_by_vid | length) == 0 + - _fabrics_resp.json is defined + set_fact: + _all_vlans: "{{ (_fabrics_resp.json | map(attribute='vlans') | list) | flatten }}" + +# Build { "": , ... } for fast lookup +- name: Build _vlan_by_vid map (keyed by VID as string) + when: + - (_vlan_by_vid | length) == 0 + - _all_vlans is defined + set_fact: + _vlan_by_vid: >- + {{ + dict( + (_all_vlans | map(attribute='vid') | map('string') | list) + | zip(_all_vlans) + ) + }} + +# Build quick lookups +- name: Build interface lookups + set_fact: + _iface_id_by_mac: >- + {{ + dict( + (_ifaces | selectattr('mac_address','defined') + | map(attribute='mac_address') + | map('lower') | list) + | zip(_ifaces | map(attribute='id')) + ) + }} + _iface_name_by_id: >- + {{ + dict( + (_ifaces | selectattr('id','defined') | map(attribute='id') | list) + | zip(_ifaces | selectattr('name','defined') | map(attribute='name') | list) + ) + }} + +# Normalize VLAN lookup for int/string keys +- name: Build VLAN lookup (int & string keys) + set_fact: + _vlan_lookup: >- + {{ + (_vlan_by_vid | default({})) + | combine( + dict(((_vlan_by_vid | default({})).keys() | list | map('string') | list) + | zip((_vlan_by_vid | default({})).values())), + recursive=True + ) + }} + +# Resolve node system_id from interface facts (avoids mismatch) +- name: Resolve node system_id for interface ops + set_fact: + _node_system_id: >- + {{ + (_ifaces | length) > 0 and ((_ifaces | first).system_id) or system_id + }} + +# Validate prefix_mac exists +- name: "Ensure {{ prefix }}_mac exists for {{ iface.prefix }}" + assert: + that: + - iface.prefix is defined + - hostvars[inv_host][iface.prefix ~ '_mac'] is defined + fail_msg: "Missing {{ iface.prefix }}_mac for {{ inv_host }}" + +# Resolve parent MAC from inventory (normalize to lower) +- name: Set _parent_mac + set_fact: + _parent_mac: "{{ hostvars[inv_host][iface.prefix ~ '_mac'] | string | lower }}" + +# Try to resolve an interface id for this MAC +- name: Resolve parent interface id + set_fact: + _parent_id: "{{ _iface_id_by_mac.get(_parent_mac) | default(None) }}" + +- include_tasks: ../_auth_header.yml + +# Optionally create missing PHYSICAL interface (when allowed) +- name: "Create missing physical interface for {{ host }}" + when: + - (_parent_id is none) or (_parent_id | string) == '' + - (maas_allow_create_physical | default(true)) | bool # toggle if you want + uri: + url: "{{ _maas_api }}/nodes/{{ _node_system_id }}/interfaces/?op=create_physical" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body_format: form-urlencoded + body: + type: "physical" + mac_address: "{{ _parent_mac }}" + # name: "{{ iface.prefix }}" # optional; MAAS may auto-name (ethX) + status_code: [200, 201] + return_content: true + register: _create_phys + no_log: true + +- include_tasks: ../_auth_header.yml + +# Refresh interfaces + lookups after possible create +- name: Refresh MAAS interface facts after create (if needed) + when: + - _create_phys is defined + uri: + url: "{{ _maas_api }}/nodes/{{ _node_system_id }}/interfaces/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + status_code: 200 + register: _ifaces_after_create + no_log: true +- name: Re-set _ifaces after create + when: + - _ifaces_after_create is defined + set_fact: + _ifaces: "{{ _ifaces_after_create.json | list }}" + +- name: Rebuild interface facts + maps after create + when: + - _ifaces_after_create is defined + set_fact: + _iface_id_by_mac: >- + {{ + dict( + (_ifaces | selectattr('mac_address','defined') | map(attribute='mac_address') + | map('lower') | list) + | zip(_ifaces | map(attribute='id')) + ) + }} + _iface_id_by_name: >- + {{ + dict( + (_ifaces | selectattr('name','defined') | map(attribute='name') | list) + | zip(_ifaces | map(attribute='id')) + ) + }} + _iface_name_by_id: >- + {{ + dict( + (_ifaces | selectattr('id','defined') | map(attribute='id') | list) + | zip(_ifaces | selectattr('name','defined') | map(attribute='name') | list) + ) + }} + +# Resolve again now that we may have created it +- name: Resolve parent interface id (post-create) + when: (_parent_id is none) or (_parent_id | string) == '' + set_fact: + _parent_id: "{{ _iface_id_by_mac.get(_parent_mac) | default(None) }}" + +# If still missing, fail cleanly (or switch to 'warn + skip' if you prefer) +- name: Abort when parent interface is missing and auto-create disabled/failed + when: _parent_id is none + fail: + msg: >- + Could not find or create physical interface with MAC {{ _parent_mac }} + on {{ inv_host }} (system_id={{ _node_system_id }}). + Either re-commission the node or allow auto-create via + maas_allow_create_physical=true. + +# Load parent object (safe now) +- name: Load parent interface object + set_fact: + _parent_obj: "{{ (_ifaces | selectattr('id','equalto', (_parent_id|int)) | list | first) | default({}) }}" + +- name: Ensure prerequisites for bond MAC match exist + set_fact: + _desired_bonds: "{{ _desired_bonds | default([]) }}" + _parent_mac: "{{ _parent_mac | string | lower }}" + _bond_match: {} + +- name: Collect matching bond (by MAC) for this parent + set_fact: + _bond_match: "{{ bond }}" + loop: "{{ _desired_bonds }}" + loop_control: + loop_var: bond + label: "{{ bond.name | default('∅') }}" + when: + - bond.interfaces is defined + - bond.native_vid is defined + - _parent_mac in (bond.interfaces | map('extract', hostvars[inv_host]) | map('string') | map('lower') | list) + +- name: Inherit native VLAN from matched bond + set_fact: + _effective_native_vid: "{{ _bond_match.native_vid }}" + _effective_native_vlan_id: "{{ _vlan_lookup[_bond_match.native_vid | string].id }}" + when: + - _bond_match is mapping + - _bond_match | length > 0 + - _bond_match.native_vid is defined + - (_bond_match.native_vid | string) in _vlan_lookup + +# If the loaded parent is a VLAN (e.g. eth0.1300), use its physical parent (e.g. eth0) +- name: Detect if loaded parent is a VLAN + set_fact: + _parent_is_vlan: "{{ _parent_obj is mapping and (_parent_obj.type | default('')) == 'vlan' }}" + +- name: Extract physical parent name from VLAN + when: _parent_is_vlan + set_fact: + _phys_parent_name: "{{ (_parent_obj.parents | default([])) | first | default('') }}" + +- name: Resolve physical parent object by name from _ifaces + when: _parent_is_vlan and (_phys_parent_name | length) > 0 + set_fact: + _phys_parent_obj: >- + {{ + (_ifaces | default([]) + | selectattr('name','equalto', _phys_parent_name) + | list | first) | default({}, true) + }} + +- name: Set parent_id to the physical iface id (obj → name map → keep old) + when: _parent_is_vlan + set_fact: + _parent_id: >- + {{ + _phys_parent_obj.id + | default(_iface_id_by_name.get(_phys_parent_name), true) + | default(_parent_id, true) + }} + _parent_obj: >- + {{ + (_phys_parent_obj if (_phys_parent_obj | length > 0) else _parent_obj) + }} + +# Safety net so we never send parent=0 again +- name: Assert parent interface id resolved before creating VLAN subinterface + assert: + that: + - _parent_id is defined + - (_parent_id | int) > 0 + fail_msg: >- + Could not resolve physical parent for '{{ iface.prefix }}'. + parent_obj={{ _parent_obj | default({}) }} maps: by_name={{ _iface_id_by_name | default({}) }}. + + +# Only check type if we actually have an object +- name: Ensure parent is physical/bond before native VLAN update + when: _parent_obj is mapping and _parent_obj.type is defined + assert: + that: + - _parent_obj.type in ['physical','bond'] + fail_msg: "Native VLAN can only be set on a physical/bond parent (id={{ _parent_id }})." + +- include_tasks: ../_auth_header.yml + +- name: Check current native on parent + set_fact: + _current_native: "{{ (_ifaces | selectattr('id','equalto', (_parent_id|int)) | map(attribute='vlan') | list | first) | default(None) }}" + +- name: Set _current_native_id from _current_native dict. Default to 0. + set_fact: + _current_native_id: >- + {{ (_current_native.id | int) + if (_current_native is mapping) + else 0 }} + +# If iface.native_vid is missing, and bond logic didn’t set anything, fallback to 'untagged' +- name: Derive native VID from _vlan_lookup when native_vid is missing + when: + - iface.native_vid is not defined + - _effective_native_vlan_id is not defined + set_fact: + _effective_native_vid: >- + {{ + (_vlan_lookup + | dict2items + | selectattr('value.name','equalto','untagged') + | map(attribute='value.vid') + | list + | first) | default(omit) + }} + +- name: Resolve native VLAN ID from VID + when: _effective_native_vid is defined + set_fact: + _effective_native_vlan_id: "{{ _vlan_lookup[_effective_native_vid|string].id }}" + +# Figure out what ID to send to MAAS +- name: Choose final native VLAN id to apply + set_fact: + _native_vlan_id_to_apply: >- + {{ + (iface.get('native_vid') is not none) + | ternary( + _vlan_lookup[iface.get('native_vid')|string].id, + _effective_native_vlan_id | default(omit) + ) + }} + +# Only if we actually have an ID, and parent is physical/bond +- name: "Set native VLAN on {{ host }}'s {{ _parent_obj.name }} (if different)" + when: + - _native_vlan_id_to_apply is defined + - _current_native is defined + - (_current_native_id | int) != (_native_vlan_id_to_apply | int) + - (_ifaces | selectattr('id','equalto', (_parent_id|int)) | map(attribute='type') | list | first) in ['physical','bond'] + uri: + url: "{{ _maas_api }}/nodes/{{ _node_system_id }}/interfaces/{{ _parent_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body_format: form-urlencoded + body: + vlan: "{{ _native_vlan_id_to_apply }}" + link_connected: true + status_code: 200 +# no_log: true + +- include_tasks: ../_auth_header.yml + +# --- Index existing VLAN subinterfaces (by parent_id + vlan_id) ---------------- +- name: Init list of existing VLAN subinterfaces + set_fact: + _existing_vlan_pairs: [] + +# Optional (fast lookup): build name -> id map once +- name: Build iface name→id map + set_fact: + _iface_name_to_id: "{{ dict(_ifaces | map(attribute='name') | zip(_ifaces | map(attribute='id'))) }}" + +# Collect existing VLAN subinterfaces (translate parent name -> id) +- name: Collect existing VLAN subinterfaces (translate parent name -> id) + vars: + _parent_name: "{{ vlan_iface.parents | default([]) | first }}" + _parent_id: "{{ (_iface_name_to_id | default({})).get(_parent_name) | default(omit) }}" + _pair: + id: "{{ vlan_iface.id }}" + name: "{{ vlan_iface.name }}" + parent_name: "{{ _parent_name }}" + parent_id: "{{ _parent_id }}" + vlan_id: "{{ vlan_iface.vlan.id }}" + set_fact: + _existing_vlan_pairs: "{{ (_existing_vlan_pairs | default([])) + [_pair] }}" + loop: "{{ _ifaces | selectattr('type','equalto','vlan') | list }}" + loop_control: + loop_var: vlan_iface + label: "{{ vlan_iface.name }} ← {{ _parent_name }} (vlan_id={{ vlan_iface.vlan.id }})" + +- name: Ensure tagged VLAN subinterfaces exist (?op=create_vlan) # guarded + when: + - iface.tagged_vids is defined + - vid in iface.tagged_vids + - _vlan_lookup[vid|string] is defined + - ( + _existing_vlan_pairs + | selectattr('parent_id','equalto', (_parent_id|int)) + | selectattr('vlan_id','equalto', _vlan_lookup[vid|string].id) + | list | length + ) == 0 + uri: + url: "{{ _maas_api }}/nodes/{{ _node_system_id }}/interfaces/?op=create_vlan" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body_format: form-urlencoded + body: + parent: "{{ _parent_id }}" + vlan: "{{ _vlan_lookup[vid|string].id }}" + status_code: [200] + return_content: true + loop: "{{ iface.tagged_vids | default([]) }}" + loop_control: + loop_var: vid + label: "{{ iface.prefix }} → VID {{ vid }}" + register: _create_vlan_results + no_log: true + +- name: Skip note (VLAN subinterface already present) + debug: + msg: >- + Skipping create: parent_id={{ _parent_id }} already has vlan_id={{ _vlan_lookup[vid|string].id }} + ({{ iface.prefix }}.{{ vid }}) + loop: "{{ iface.tagged_vids | default([]) }}" + loop_control: + loop_var: vid + label: "{{ iface.prefix }} → VID {{ vid }}" + when: + - iface.tagged_vids is defined + - > + (_existing_vlan_pairs + | selectattr('parent_id','equalto', (_parent_id|int)) + | selectattr('vlan_id','equalto', _vlan_lookup[vid|string].id) + | list | length) > 0 + +- name: Rebuild interface facts + maps after create (if any changed) + when: + - _create_vlan_results is defined + - (_create_vlan_results.results | selectattr('status','defined') | list | length) > 0 + include_tasks: machines/_refresh_iface_facts.yml diff --git a/roles/maas/tasks/machines/_build_indexes.yml b/roles/maas/tasks/machines/_build_indexes.yml new file mode 100644 index 00000000..2a1af69b --- /dev/null +++ b/roles/maas/tasks/machines/_build_indexes.yml @@ -0,0 +1,101 @@ +--- +# Builds all maps you rely on: by FQDN, by short, ids, macs, etc. + +# FQDN -> object and FQDN -> macs +- name: Build maps keyed by FQDN + set_fact: + maas_by_hostname: >- + {{ + maas_by_hostname | default({}) + | combine({ item.hostname: item }, recursive=True) + }} + maas_host_to_macs: >- + {{ + maas_host_to_macs | default({}) + | combine({ + item.hostname: ( + (item.interface_set | map(attribute='mac_address') | list) + if (item.interface_set is defined) + else [] + ) + }, recursive=True) + }} + maas_host_to_ifaces: >- + {{ + maas_host_to_ifaces | default({}) + | combine({ + item.hostname: (item.interface_set | default([])) + }, recursive=True) + }} + maas_host_to_status: >- + {{ + maas_host_to_status | default({}) + | combine({ item.hostname: (item.status_name) }, recursive=True) + }} + loop: "{{ maas_nodes_list }}" + loop_control: + label: "{{ item.hostname | default('NO-HOSTNAME') }}" + when: item.hostname is defined + +# Short names list (dedup check can use this) +# Build short name list (from MAAS payload, no regex needed) +- name: Build short name list + set_fact: + _short_names: >- + {{ + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value.hostname') + | reject('equalto', None) + | list + }} + +# short -> id +- name: Build maas_short_to_id + set_fact: + maas_short_to_id: >- + {{ + dict( + ( + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value.hostname') + | reject('equalto', None) + ) + | zip( + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value.system_id') + ) + ) + }} + +# short -> object +- name: Build maas_by_short + set_fact: + maas_by_short: >- + {{ + dict( + ( + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value.hostname') + | reject('equalto', None) + ) + | zip( + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value') + ) + ) + }} + +# short -> ansible inventory_host +- name: Build inventory_by_short + set_fact: + inventory_by_short: >- + {{ + (inventory_by_short | default({})) + | combine({ (item.split('.')[0]): item }) + }} + loop: "{{ groups['testnodes'] }}" diff --git a/roles/maas/tasks/machines/_create_vlan_on_parent.yml b/roles/maas/tasks/machines/_create_vlan_on_parent.yml new file mode 100644 index 00000000..f7a624de --- /dev/null +++ b/roles/maas/tasks/machines/_create_vlan_on_parent.yml @@ -0,0 +1,43 @@ +--- +# Expected vars (passed by caller): +# - parent_id (int/string MAAS iface ID of the parent, e.g. bond id) +# - vlan_id (int/string MAAS VLAN object id, not VID) +# - node_system_id (MAAS node system_id, e.g. gseprg) +# - vid_label (optional, for nicer labels/logging) + +- name: Validate required vars + fail: + msg: >- + Missing var(s). parent_id={{ parent_id|default('UNSET') }}, + vlan_id={{ vlan_id|default('UNSET') }}, + node_system_id={{ node_system_id|default('UNSET') }} + when: parent_id is not defined or vlan_id is not defined or node_system_id is not defined + +# Optional: quick sanity that the parent exists in _ifaces (if _ifaces available) +- name: Sanity-check parent exists on node (optional) + vars: + _parent_found: >- + {{ + (_ifaces | selectattr('id','equalto', parent_id|int) | list | length) > 0 + }} + when: + - _ifaces is defined + - not _parent_found | bool + fail: + msg: "Parent interface id {{ parent_id }} not found on node {{ node_system_id }}" + +- include_tasks: ../_auth_header.yml + +- name: POST op=create_vlan (parent={{ parent_id }}, vlan={{ vlan_id }}) # {{ vid_label | default('') }} + uri: + url: "{{ _maas_api }}/nodes/{{ node_system_id }}/interfaces/?op=create_vlan" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "parent={{ parent_id }}&vlan={{ vlan_id }}" + body_format: form-urlencoded + status_code: 200 + register: _create_vlan_resp + changed_when: true diff --git a/roles/maas/tasks/machines/_ensure_bond.yml b/roles/maas/tasks/machines/_ensure_bond.yml new file mode 100644 index 00000000..77ef27ae --- /dev/null +++ b/roles/maas/tasks/machines/_ensure_bond.yml @@ -0,0 +1,332 @@ +--- +# Assumes incoming vars: +# - system_id +# - bond: { name, mode, mtu, link_speed?, interfaces[] or parents[], tagged_vids? } +# - _ifaces: current MAAS interface list for node (from your refresh task) +# - _vlan_lookup: { vid(str) -> vlan_obj with .id } +# Uses ../_auth_header.yml to set maas_auth_header for MAAS API calls. + +############################################################################### +# 1) Lookups & desired parent MACs (resolve from tokens or explicit MACs) +############################################################################### +- name: Build iface lookup maps (name→mac, mac→id, id→name) + set_fact: + _name_to_mac: "{{ dict(_ifaces | map(attribute='name') | zip(_ifaces | map(attribute='mac_address') | map('lower'))) }}" + _mac_to_id: "{{ dict(_ifaces | map(attribute='mac_address') | map('lower') | zip(_ifaces | map(attribute='id'))) }}" + _id_to_name: "{{ dict(_ifaces | map(attribute='id') | zip(_ifaces | map(attribute='name'))) }}" + +- name: Collect desired parent tokens (could be var names or MACs) + set_fact: + _desired_parent_tokens: "{{ (bond.interfaces | default(bond.parents) | default([])) | map('string') | list }}" + +# init +- name: Resolve inventory host for token lookup + set_fact: + _inv_host_resolved: "{{ inv_host | default(inventory_hostname) }}" + _desired_parent_macs: [] + _unresolved_parent_tokens: [] + changed_when: false + +# append MACs (only when token resolves to a MAC) +- name: Append MACs from tokens + vars: + tok: "{{ token | string }}" + val: "{{ (hostvars[_inv_host_resolved][tok] | default(tok)) | string }}" + mac_norm: "{{ val | lower }}" + is_mac: "{{ mac_norm is match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$') }}" + when: is_mac | bool + set_fact: + _desired_parent_macs: "{{ _desired_parent_macs + [ mac_norm ] }}" + loop: "{{ _desired_parent_tokens }}" + loop_control: + loop_var: token + changed_when: false + +# collect unresolved tokens (those that did NOT resolve to a MAC) +- name: Collect unresolved tokens + vars: + tok: "{{ token | string }}" + val: "{{ (hostvars[_inv_host_resolved][tok] | default(tok)) | string }}" + mac_norm: "{{ val | lower }}" + is_mac: "{{ mac_norm is match('^([0-9a-f]{2}:){5}[0-9a-f]{2}$') }}" + when: not is_mac | bool + set_fact: + _unresolved_parent_tokens: "{{ _unresolved_parent_tokens + [ tok ] }}" + loop: "{{ _desired_parent_tokens }}" + loop_control: + loop_var: token + changed_when: false + +# fail if any didn’t resolve +- name: Fail if any desired parent tokens didn’t resolve to MACs + fail: + msg: "For bond {{ bond.name }}, could not resolve parent(s) to MACs: {{ _unresolved_parent_tokens }}" + when: _unresolved_parent_tokens | length > 0 + +# normalize (lowercase, unique, sorted) +- name: Normalize desired parent MACs + set_fact: + _desired_parent_macs: "{{ _desired_parent_macs | map('lower') | list | unique | sort }}" + + +############################################################################### +# 2) Detect existing bond (by name; fallback by exact parent MAC set) +############################################################################### +- name: Find existing bond by name + set_fact: + _bond_by_name: >- + {{ + (_ifaces | selectattr('type','equalto','bond') + | selectattr('name','equalto', bond.name) + | list | first) | default({}) + }} + +# set from _bond_by_name (computed in the previous task) +- name: Cache bond object/id from _bond_by_name + set_fact: + _existing_bond_obj: "{{ _bond_by_name | default({}) }}" + _existing_bond_id: "{{ (_bond_by_name.id | default(0)) | int }}" + +# (delete the “If not found by name…” task — it does nothing) + +# now, only scan by parent MACs if the id is still 0 +- name: Scan bonds to match desired parent MACs (order-insensitive) + when: (_existing_bond_id | int) == 0 + set_fact: + _existing_bond_obj: >- + {{ + bond_iface if ( + ((bond_iface.parents | default([])) + | map('extract', _name_to_mac) | map('lower') | list | sort) + == _desired_parent_macs + ) + else _existing_bond_obj | default({}) + }} + _existing_bond_id: >- + {{ + ( + bond_iface.id if ( + ((bond_iface.parents | default([])) + | map('extract', _name_to_mac) | map('lower') | list | sort) + == _desired_parent_macs + ) + else _existing_bond_id | default(0) + ) | int + }} + loop: "{{ _ifaces | selectattr('type','equalto','bond') | list }}" + loop_control: + loop_var: bond_iface + +# 1) Compute observed parent MACs from the bond object +- name: Compute observed parent MACs + set_fact: + _observed_parent_macs: >- + {{ + (_existing_bond_obj.parents | default([])) + | map('extract', _name_to_mac) + | select('defined') + | map('lower') | list | sort + }} + +# 2) (Idempotent) normalize desired list just in case +- name: Normalize desired parent MACs + set_fact: + _desired_parent_macs: "{{ (_desired_parent_macs | default([])) | map('lower') | list | sort }}" + +# 3) Compare using normalized types/lists +- name: Compute MAC-based parent match flag + set_fact: + _bond_parents_match: "{{ (_existing_bond_id | int) > 0 and (_observed_parent_macs == _desired_parent_macs) }}" + +############################################################################### +# 3) Create bond only if missing or parents differ (parents by MAC → IDs) +############################################################################### +- name: Compute desired parent IDs from MACs + set_fact: + _bond_parent_ids: >- + {{ + _desired_parent_macs + | map('lower') + | map('extract', _mac_to_id) + | list + }} + +- name: Fail if any desired MACs are unknown to MAAS + vars: + _missing: "{{ _desired_parent_macs | difference(_mac_to_id.keys() | list) | list }}" + fail: + msg: "MAAS has no interfaces with MAC(s): {{ _missing }}" + when: _missing | length > 0 + +- name: Temporarily set _bond_create_native_vid + set_fact: + _bond_create_native_vid: "{{ _vlan_lookup[bond.native_vid|string].id }}" + when: + - bond.native_vid is defined + - (bond.native_vid|string) in _vlan_lookup + +- name: Build create_bond payload (no link_speed on create) + when: not _bond_parents_match + set_fact: + _create_bond_qs: >- + {{ + ( + ['name=' ~ (bond.name | urlencode)] + + (_bond_parent_ids + | map('string') + | map('regex_replace','^(.*)$','parents=\1') + | list) + + (bond.mtu is defined | ternary(['mtu=' ~ (bond.mtu|string)], [])) + + (bond.mode is defined | ternary(['bond_mode=' ~ (bond.mode | urlencode)], [])) + + (_bond_create_native_vid is defined + | ternary(['vlan=' ~ (_bond_create_native_vid|string)], [])) + ) + | join('&') + }} + +- include_tasks: ../_auth_header.yml + when: not _bond_parents_match + +- name: POST ?op=create_bond (only if needed) + when: not _bond_parents_match + uri: + url: "{{ _maas_api }}/nodes/{{ system_id }}/interfaces/?op=create_bond" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "{{ _create_bond_qs }}" + body_format: form-urlencoded + status_code: 200 + register: _bond_create_resp + changed_when: true +# no_log: true + +- name: Refresh interface facts (post create) + when: not _bond_parents_match + include_tasks: ../_refresh_iface_facts.yml + +# get the object +- name: Re-resolve bond by name (after create) + set_fact: + _existing_bond_obj: >- + {{ + (_ifaces | selectattr('type','equalto','bond') + | selectattr('name','equalto', bond.name) + | list | first) | default({}) + }} + +# now cache the id (cast to int) +- name: Cache bond id after re-resolve + set_fact: + _existing_bond_id: "{{ (_existing_bond_obj.id | default(0)) | int }}" + +- name: Decide if we need to update the MTU or bond mode + set_fact: + _needs_bond_base_update: >- + {{ + (_existing_bond_obj.params.bond_mode | lower | default('')) != (bond.mode | lower | default('')) + or + (_existing_bond_obj.params.mtu | int | default(0)) != (bond.mtu | int | default(0)) + }} + +############################################################################### +# 4) Update base settings; link_speed only when carrier is up +############################################################################### +- include_tasks: ../_auth_header.yml + +#- pause: + +- name: PUT /interfaces/{id} (bond_mode/mtu) +# when: _existing_bond_id | int > 0 + when: _needs_bond_base_update + uri: + url: "{{ _maas_api }}/nodes/{{ system_id }}/interfaces/{{ _existing_bond_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "bond_mode={{ bond.mode }}&mtu={{ bond.mtu }}" + body_format: form-urlencoded + status_code: 200 + register: _bond_base_update + changed_when: true + #no_log: true + +#- pause: + +- name: Read link_connected for bond iface + set_fact: + _bond_link_connected: >- + {{ + (_ifaces + | selectattr('id','equalto', _existing_bond_id|int) + | map(attribute='link_connected') + | list + | first) | default(false) + }} + +- name: Decide if we need to update the link speed + set_fact: + _needs_bond_speed_update: >- + {{ + (_existing_bond_obj.link_speed | int | default(0)) != (bond.link_speed | int | default(0)) + }} + +- include_tasks: ../_auth_header.yml + +- name: PUT /interfaces/{id} (link_speed — only when up) + when: + - bond.link_speed is defined + - _bond_link_connected | bool + - _existing_bond_id | int > 0 + - _needs_bond_speed_update + uri: + url: "{{ _maas_api }}/nodes/{{ system_id }}/interfaces/{{ _existing_bond_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "link_speed={{ bond.link_speed }}" + body_format: form-urlencoded + status_code: 200 + register: _bond_speed_update + changed_when: true + +############################################################################### +# 5) Ensure tagged VLAN subinterfaces on the bond +############################################################################### +- name: Gather VLAN vids already on {{ bond.name }} + set_fact: + _bond_existing_tagged_vids: >- + {{ + (_ifaces + | selectattr('type','equalto','vlan') + | selectattr('parents','defined') + | selectattr('parents','contains', bond.name) + | map(attribute='vlan') | select('defined') + | map(attribute='vid') | map('string') | list) + }} + +- name: Compute desired vids + set_fact: + _bond_desired_tagged_vids: "{{ (bond.tagged_vids | default([])) | map('string') | unique | list }}" + +- name: Compute missing vids + set_fact: + _bond_missing_tagged_vids: "{{ _bond_desired_tagged_vids | difference(_bond_existing_tagged_vids | default([])) }}" + +- name: Create missing VLAN subinterfaces on {{ bond.name }} + include_tasks: ../_create_vlan_on_parent.yml + loop: "{{ _bond_missing_tagged_vids }}" + loop_control: + loop_var: vid + label: "{{ bond.name }} → VID {{ vid }}" + vars: + parent_id: "{{ _existing_bond_id }}" + vlan_id: "{{ _vlan_lookup[vid|string].id }}" + vid_label: "{{ vid|string }}" + node_system_id: "{{ system_id }}" diff --git a/roles/maas/tasks/machines/_fetch_vlans_for_fabric.yml b/roles/maas/tasks/machines/_fetch_vlans_for_fabric.yml new file mode 100644 index 00000000..84ed9ca2 --- /dev/null +++ b/roles/maas/tasks/machines/_fetch_vlans_for_fabric.yml @@ -0,0 +1,30 @@ +--- +# 1) Refresh MAAS auth header (new nonce) +- include_tasks: ../_auth_header.yml + +# 2) GET vlans for this fabric +- name: Read VLANs for fabric {{ fab.id }} + uri: + url: "{{ _maas_api }}/fabrics/{{ fab.id }}/vlans/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: yes + status_code: 200 + register: _vlans_this_fabric + +# 3) Merge into vid -> vlan-object map +- name: Merge VLANs from fabric {{ fab.id }} into _vlan_by_vid + set_fact: + _vlan_by_vid: >- + {{ + _vlan_by_vid + | combine( + dict( + (_vlans_this_fabric.json | map(attribute='vid') | list) + | zip(_vlans_this_fabric.json) + ), + recursive=True + ) + }} diff --git a/roles/maas/tasks/machines/_mark_broken.yml b/roles/maas/tasks/machines/_mark_broken.yml new file mode 100644 index 00000000..17dc7dce --- /dev/null +++ b/roles/maas/tasks/machines/_mark_broken.yml @@ -0,0 +1,63 @@ +--- +# Ensure we can talk to MAAS +- include_tasks: ../_auth_header.yml + +## Read current status so we can be idempotent +#- name: GET {{ inv_host }} details (status check) +# uri: +# url: "{{ _maas_api }}/nodes/{{ system_id }}/" +# method: GET +# headers: +# Authorization: "{{ maas_auth_header }}" +# Accept: application/json +# status_code: 200 +# return_content: true +# register: _node_get + +- name: Cache current MAAS status name + set_fact: + _maas_status_name: "{{ (_node_get.json.status_name | default('')) | string }}" + +- block: + - name: Build mark_broken comment body + set_fact: + _mark_broken_body: "comment={{ ('Temp: editing NIC at ' ~ broken_at) | urlencode }}" + + - include_tasks: ../_auth_header.yml + + # Mark Broken only if not already Broken + - name: POST {{ inv_host }} op=mark_broken (with note) + when: _maas_status_name != 'Broken' + uri: + url: "{{ _maas_api }}/machines/{{ system_id }}/op-mark_broken" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "{{ _mark_broken_body }}" + body_format: form-urlencoded + status_code: 200 + register: _mark_broken_resp + changed_when: true + failed_when: _mark_broken_resp.status not in [200, 403] + + # Add to the shared un-break list only if we actually marked it + - name: Remember that we marked {{ inv_host }} Broken + when: _mark_broken_resp.status == 200 + set_fact: + _marked_broken: "{{ (hostvars['localhost']._marked_broken | default([])) + [ system_id ] }}" + delegate_to: localhost + changed_when: false + + # Add to the shared "We skipped these" list. A machine can not be marked broken, marked fixed, and mark broken again without being re-commissioned. + # The only way around this really is to delete the host entry and start over. + # For now, we'll just store the list of hostnames and print it at the end for human intervention. + - name: Remember that we failed to mark {{ inv_host }} broken + when: _mark_broken_resp.status == 403 + set_fact: + _failed_to_mark_broken: "{{ (hostvars['localhost']._failed_to_mark_broken | default([])) + [ system_id ] }}" + delegate_to: localhost + changed_when: false + when: system_status not in ['Broken', 'Ready'] +# when: _maas_status_name not in ['Broken', 'Ready'] diff --git a/roles/maas/tasks/machines/_plan_sets.yml b/roles/maas/tasks/machines/_plan_sets.yml new file mode 100644 index 00000000..c1cc75dd --- /dev/null +++ b/roles/maas/tasks/machines/_plan_sets.yml @@ -0,0 +1,53 @@ +--- +# 1) Normalize everything to SHORT names (no regex needed for MAAS) +- name: Normalize hostnames (ignore domains) + set_fact: + # Short names that exist in MAAS right now + _existing_names: >- + {{ + (maas_by_hostname | default({})) + | dict2items + | map(attribute='value.hostname') + | reject('equalto', None) + | list + }} + + # Short names from your inventory group + testnode_names: >- + {{ + groups.get('testnodes', []) + | map('extract', hostvars, 'inventory_hostname_short') + | reject('equalto', None) + | list + }} + + # Short names that must be excluded + maas_excluded_hosts: >- + {{ + ( + groups.get('maas_region_rack_server', []) + + groups.get('maas_db_server', []) + + groups.get('maas_dont_delete', []) + ) + | map('extract', hostvars, 'inventory_hostname_short') + | reject('equalto', None) + | unique + | list + }} + +# 2) Plan using SHORT names only +- name: Determine which hosts to create, update, and delete + set_fact: + _create_short: "{{ testnode_names | difference(_existing_names + maas_excluded_hosts) | list }}" + _delete_short: "{{ _existing_names | difference(testnode_names + maas_excluded_hosts) | list }}" + _update_short: "{{ (_existing_names | intersect(testnode_names)) | difference(maas_excluded_hosts) | list }}" + +# Plan: set IPMI creds for everything in create + update (short names) +- name: Build combined IPMI plan list (create + update) + set_fact: + _plan_ipmi: >- + {{ + ((_create_short | default([])) + (_update_short | default([]))) + | unique + | list + }} diff --git a/roles/maas/tasks/machines/_read_machines.yml b/roles/maas/tasks/machines/_read_machines.yml new file mode 100644 index 00000000..3b297e66 --- /dev/null +++ b/roles/maas/tasks/machines/_read_machines.yml @@ -0,0 +1,26 @@ +--- +- include_tasks: _auth_header.yml + +# Queries MAAS and builds maas_nodes_list + _with_names +- name: Read all machines from MAAS + uri: + url: "{{ _maas_api }}/machines/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: yes + status_code: 200 + register: _all_machines + +- pause: + +- name: Parse MAAS machines JSON + set_fact: + maas_nodes_list: "{{ _all_machines.json | list }}" + +- pause: + +- name: Keep only entries with hostname + set_fact: + _with_names: "{{ maas_nodes_list | selectattr('hostname', 'defined') | list }}" diff --git a/roles/maas/tasks/machines/_refresh_iface_facts.yml b/roles/maas/tasks/machines/_refresh_iface_facts.yml new file mode 100644 index 00000000..8d7e2a81 --- /dev/null +++ b/roles/maas/tasks/machines/_refresh_iface_facts.yml @@ -0,0 +1,110 @@ +--- +# Fresh auth (new nonce/timestamp) for every API call +#- name: Build OAuth header (fresh nonce/timestamp) +# include_tasks: ../_auth_header.yml + +## 1) Fetch all interfaces for this node +#- name: Read MAAS interfaces for this node +# uri: +# url: "{{ _maas_api }}/nodes/{{ _node_system_id }}/interfaces/" +# method: GET +# headers: +# Authorization: "{{ maas_auth_header }}" +# Accept: application/json +# return_content: true +# status_code: 200 +# register: _ifaces_resp + +- include_tasks: machines/_read_machines.yml + +- pause: + +- include_tasks: machines/_build_indexes.yml + +- pause: + +- name: Set raw interface list + set_fact: +# _ifaces: "{{ _ifaces_resp.json | default([]) }}" + _ifaces: "{{ maas_host_to_ifaces[host] }}" + + +# 2) Rebuild quick lookups +- name: Build interface lookup maps (by name, by id, by mac) + set_fact: + _iface_by_name: >- + {{ + dict( + (_ifaces | map(attribute='name') | list) + | zip(_ifaces | list) + ) + }} + _iface_id_by_name: >- + {{ + dict( + (_ifaces | map(attribute='name') | list) + | zip(_ifaces | map(attribute='id') | list) + ) + }} + _iface_id_by_mac: >- + {{ + dict( + ( + _ifaces + | selectattr('mac_address','defined') + | map(attribute='mac_address') + | map('lower') + | list + ) + | zip( + _ifaces + | selectattr('mac_address','defined') + | map(attribute='id') + | list + ) + ) + }} + +# 3) Index existing VLAN subinterfaces as (parent_id, vlan_id) pairs +- name: Init existing VLAN pair index + set_fact: + _existing_vlan_pairs: [] + +- name: Build existing VLAN pair index + set_fact: + _existing_vlan_pairs: >- + {{ + _existing_vlan_pairs + [ { + 'parent_id': (_iface_id_by_name.get(item.parents[0]) | int), + 'vlan_id': item.vlan.id, + 'iface_id': item.id, + 'name': item.name + } ] + }} + loop: "{{ _ifaces | selectattr('type','equalto','vlan') | list }}" + when: + - item.parents is defined + - (item.parents | length) > 0 + - item.vlan is defined + - item.vlan.id is defined + loop_control: + label: "{{ item.name | default(item.id) }}" + +# 4) Track current native VLAN per *parent* interface (physical/bond) +- name: Init native VLAN map + set_fact: + _native_by_parent: {} + +#- name: Build native VLAN map (parent_id -> vlan_id or None) +# set_fact: +# _native_by_parent: "{{ _native_by_parent | combine({ (item.id | int): (item.vlan.id if (item.vlan is mapping and item.vlan.id is defined) else None) }) }}" +# loop: "{{ _ifaces | rejectattr('type','equalto','vlan') | list }}" +# loop_control: +# label: "{{ item.name | default(item.id) }}" +- name: Build native VLAN map (parent_id -> vlan_id or None) + set_fact: + _native_by_parent: "{{ _native_by_parent | combine({ (iface.id | int): (iface.vlan.id if iface.vlan is mapping else None) }) }}" + loop: "{{ _ifaces | rejectattr('type','equalto','vlan') | list }}" + loop_control: + loop_var: iface + label: "{{ iface.name | default(iface.id) }}" diff --git a/roles/maas/tasks/machines/cleanup.yml b/roles/maas/tasks/machines/cleanup.yml new file mode 100644 index 00000000..e08b8c8b --- /dev/null +++ b/roles/maas/tasks/machines/cleanup.yml @@ -0,0 +1,56 @@ +# Ensure auth header for cleanup +- include_tasks: ../_auth_header.yml + +# Normalize unique list (in case the same node was handled twice) +- name: Normalize _marked_broken unique list + set_fact: + _marked_broken: "{{ _marked_broken | default([]) | unique }}" + run_once: true + delegate_to: localhost + +# Fetch current status for each before flipping (idempotent safeguard) +- name: GET node details before un-breaking + uri: + url: "{{ _maas_api }}/nodes/{{ sid }}/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + status_code: 200 + return_content: true + loop: "{{ _marked_broken | default([]) }}" + loop_control: + loop_var: sid + register: _cleanup_status + +- include_tasks: ../_auth_header.yml + +# Un-break only those still Broken +- name: POST op=mark_fixed + uri: + url: "{{ _maas_api }}/machines/{{ sid }}/op-mark_fixed" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "" + body_format: form-urlencoded + status_code: 200 + loop: >- + {{ + (_cleanup_status.results | default([])) + | selectattr('json.status_name','defined') + | selectattr('json.status_name','equalto','Broken') + | map(attribute='sid') | list + }} + loop_control: + loop_var: sid + register: _mark_fixed_resp + changed_when: true + +# Optional: clear the list so a later run doesn’t try to un-break again +- name: Clear shared _marked_broken list + set_fact: + _marked_broken: [] + run_once: true diff --git a/roles/maas/tasks/machines/create.yml b/roles/maas/tasks/machines/create.yml new file mode 100644 index 00000000..c6d73a60 --- /dev/null +++ b/roles/maas/tasks/machines/create.yml @@ -0,0 +1,36 @@ +--- +#- include_tasks: ../_resolve_host.yml + +- include_tasks: _auth_header.yml + +- name: Build machine create body + set_fact: + maas_create_body: >- + {{ + dict({ + 'hostname': host, + 'deployed': true, + 'architecture': desired_arch, + 'mac_addresses': mac_addresses + } + | combine( desired_domain is defined and {'domain': desired_domain} or {} )) + }} + +- name: machines create body for {{ host }} (system_id={{ system_id }}) + debug: + var: maas_create_body + +- name: Create machine in MAAS + uri: + url: "{{ _maas_api }}/machines/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ maas_create_body }}" + status_code: 200 + register: create_result + changed_when: create_result.status in [200, 201] + notify: "Rebuild MAAS machine indexes" diff --git a/roles/maas/tasks/machines/delete.yml b/roles/maas/tasks/machines/delete.yml new file mode 100644 index 00000000..b92b27bf --- /dev/null +++ b/roles/maas/tasks/machines/delete.yml @@ -0,0 +1,6 @@ +--- +#- include_tasks: ../_resolve_host.yml + +- name: Would have deleted host {{ host }} + debug: + msg: "Would have deleted host {{ host }}" diff --git a/roles/maas/tasks/machines/set_ipmi_creds.yml b/roles/maas/tasks/machines/set_ipmi_creds.yml new file mode 100644 index 00000000..a81a3ad6 --- /dev/null +++ b/roles/maas/tasks/machines/set_ipmi_creds.yml @@ -0,0 +1,106 @@ +--- +#- name: Build power configuration payload +# set_fact: +# maas_power_payload: >- +# {{ +# { +# "power_type": "ipmi", +# "power_parameters_power_address": hostvars[inv_host].ipmi, +# "power_parameters_power_user": hostvars[inv_host].power_user, +# "power_parameters_power_pass": hostvars[inv_host].power_pass, +# } +# }} +# +#- include_tasks: ../_auth_header.yml +# +#- name: Set IPMI Credentiaals +# uri: +# url: "{{ _maas_api }}/machines/{{ system_id }}/" +# method: PUT +# headers: +# Authorization: "{{ maas_auth_header }}" +# Accept: application/json +# Content-Type: application/x-www-form-urlencoded +# body: "{{ maas_power_payload }}" +# body_format: form-urlencoded +# status_code: 200 +# register: set_ipmi_creds_result +# changed_when: set_ipmi_creds_result.status in [200, 201] +# no_log: true + +# Derive short hostname and base group (strip trailing digits) +- name: Prep IPMI secrets lookup context + set_fact: + _inv_short: "{{ hostvars[inv_host].inventory_hostname_short | default(inventory_hostname_short) }}" + _base_group: "{{ (hostvars[inv_host].inventory_hostname_short | default(inventory_hostname_short)) | regex_replace('\\d+$', '') }}" + +# Build candidates in priority order +- name: Build IPMI secrets candidate list + set_fact: + _ipmi_files: + - "{{ secrets_path }}/host_vars/{{ _inv_short }}.yml" + - "{{ secrets_path }}/group_vars/{{ _base_group }}.yml" + - "{{ secrets_path }}/ipmi.yml" + +# Load first found file (host_vars short -> group_vars/.yml -> ipmi.yml) +- name: Load IPMI secrets (first found) + include_vars: + file: "{{ lookup('first_found', {'files': _ipmi_files, 'skip': True}) }}" + name: ipmi_secrets + # add this if secrets live on the controller: + # delegate_to: localhost + +# Ensure required keys exist +- name: Ensure IPMI user/pass are present from secrets + assert: + that: + - ipmi_secrets is defined + - ipmi_secrets.power_user is defined + - ipmi_secrets.power_pass is defined + fail_msg: >- + Missing IPMI secrets for {{ inv_host }}. Looked in: {{ _ipmi_files }} + +# Build payload using inventory IPMI address + secrets user/pass +- name: Build power configuration payload + set_fact: + maas_power_payload: + power_type: "ipmi" + power_parameters_power_address: "{{ hostvars[inv_host].ipmi }}" + power_parameters_power_user: "{{ ipmi_secrets.power_user }}" + power_parameters_power_pass: "{{ ipmi_secrets.power_pass }}" + +# Ensure creds exist +- name: Ensure IPMI user/pass are present from secrets + assert: + that: + - ipmi_secrets is defined + - ipmi_secrets.power_user is defined + - ipmi_secrets.power_pass is defined + fail_msg: >- + Missing IPMI secrets for {{ inv_host }}. Searched: {{ _ipmi_files }} + +# Build payload using inventory IPMI address + secrets user/pass +- name: Build power configuration payload + set_fact: + maas_power_payload: + power_type: "ipmi" + power_parameters_power_address: "{{ hostvars[inv_host].ipmi }}" + power_parameters_power_user: "{{ ipmi_secrets.power_user }}" + power_parameters_power_pass: "{{ ipmi_secrets.power_pass }}" + +- include_tasks: ../_auth_header.yml + +- name: Set IPMI Credentials + uri: + url: "{{ _maas_api }}/machines/{{ system_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + Content-Type: application/x-www-form-urlencoded + body: "{{ maas_power_payload }}" + body_format: form-urlencoded + status_code: 200 + register: set_ipmi_creds_result + changed_when: set_ipmi_creds_result.status in [200, 201] +# no_log: true diff --git a/roles/maas/tasks/machines/update.yml b/roles/maas/tasks/machines/update.yml new file mode 100644 index 00000000..1fe2cbde --- /dev/null +++ b/roles/maas/tasks/machines/update.yml @@ -0,0 +1,41 @@ +--- +# roles/maas/tasks/machines/update.yml + +# 1) Fresh OAuth header (nonce/timestamp) +- name: Build OAuth header + include_tasks: _auth_header.yml + +# 2) Record node system_id for downstream includes +- name: Remember {{ inv_host }} = systemd id {{ system_id }} + set_fact: + _node_system_id: "{{ system_id }}" + +# 5) Initialize desired structures so later tasks never explode on undefined +# Load desired bonds & interfaces from group_vars +- name: Load desired bonds & interfaces from group_vars + set_fact: + _desired_bonds: "{{ hostvars[inv_host].maas_bonds | default([]) }}" + _desired_ifaces: "{{ hostvars[inv_host].maas_interfaces | default([]) }}" + +- include_tasks: machines/_refresh_iface_facts.yml + +- include_tasks: machines/_mark_broken.yml + when: system_status != 'Broken' + +- name: Apply interfaces (native_vid + tagged_vids) + include_tasks: machines/_apply_one_iface.yml + loop: "{{ _desired_ifaces }}" + loop_control: + loop_var: desired_iface + label: "{{ desired_iface.prefix | default('(no prefix)') }}" + vars: + iface_obj: "{{ desired_iface }}" + +# 9) Ensure bonds (each include runs per bond; no block-looping) +- name: Ensure each bond + when: (_desired_bonds | default([])) | length > 0 + include_tasks: machines/_ensure_bond.yml + loop: "{{ _desired_bonds | default([]) }}" + loop_control: + loop_var: bond + label: "{{ bond.name | default('unnamed-bond') }}" diff --git a/roles/maas/tasks/main.yml b/roles/maas/tasks/main.yml index 567ec0b5..db20838f 100644 --- a/roles/maas/tasks/main.yml +++ b/roles/maas/tasks/main.yml @@ -56,7 +56,7 @@ tags: - config_dhcp - config_maas - - add_machines +# - machines - config_dns - config_ntp - add_users @@ -68,7 +68,7 @@ tags: - config_dhcp - config_maas - - add_machines +# - machines - config_dns - config_ntp - add_users @@ -76,6 +76,16 @@ # Configure MAAS - import_tasks: config_maas.yml +- import_tasks: api_auth_pretasks.yml + tags: + - always + - api + +# Configure Networks +- import_tasks: networking.yml + tags: + - networking + # Configure NTP Service - import_tasks: config_ntp.yml @@ -94,7 +104,8 @@ tags: config_dhcp # Add Machines into MAAS -- import_tasks: add_machines.yml +- import_tasks: machines.yml + tags: machines # Add Users into MAAS - import_tasks: add_users.yml @@ -105,7 +116,7 @@ tags: - config_dhcp - config_maas - - add_machines +# - machines - config_dns - config_ntp - add_users diff --git a/roles/maas/tasks/networking.yml b/roles/maas/tasks/networking.yml new file mode 100644 index 00000000..505a2f7b --- /dev/null +++ b/roles/maas/tasks/networking.yml @@ -0,0 +1,432 @@ +--- +# Prereqs (set by your own auth tasks): +# - maas_api_url: e.g. "http://10.64.1.25:5240" +# - maas_auth_header: OAuth 1.0 PLAINTEXT header string +# Inputs: +# - maas_networking: your fabric/vlan/subnet structure +# - maas_global_dns_servers: optional list of DNS servers +# - maas_global_primary_rack_controller: optional Controller *hostname* +# Rack Controller must be defined at the VLAN level if not defined globally. + +################################################################################ +# API base +################################################################################ +- name: Set MAAS API base URL + set_fact: + _maas_api: "{{ maas_api_url | trim('/') }}/MAAS/api/2.0" + +################################################################################ +# Inventory Validation +################################################################################ + +# --- Check for DHCP-enabled VLANs that are missing dynamic ip_ranges ---------- + +# Always init so the assert never sees an undefined var +- name: Init list of DHCP violations + set_fact: + _dhcp_missing_dynamic: [] + +- name: Build list of fabric/vlan pairs + set_fact: + _fabric_vlans: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" + +# Flag any VLAN with dhcp_on=true but no dynamic ranges on any of its subnets +- name: Find DHCP-enabled VLANs missing dynamic ranges + vars: + _vlan: "{{ item.1 }}" + _dyn_count: >- + {{ + (_vlan.subnets | default([])) + | selectattr('ip_ranges','defined') + | map(attribute='ip_ranges') + | flatten + | selectattr('type','equalto','dynamic') + | list + | length + }} + when: + - _vlan.dhcp_on | default(false) | bool + - (_dyn_count | int) == 0 + set_fact: + _dhcp_missing_dynamic: >- + {{ + (_dhcp_missing_dynamic | default([])) + + [ { 'fabric': item.0.fabric, 'vid': _vlan.vid, 'name': _vlan.name | default('') } ] + }} + loop: "{{ _fabric_vlans }}" + loop_control: + label: "{{ item.0.fabric }}:{{ item.1.vid }}" + +- name: Fail if any DHCP-enabled VLAN lacks a dynamic range + assert: + that: + - (_dhcp_missing_dynamic | default([])) | length == 0 + fail_msg: >- + DHCP is enabled but no dynamic range is defined on these VLANs: + {{ (_dhcp_missing_dynamic | default([])) | to_nice_json }} + +# --- Check for undefined primary rack controller per VLAN --------------------- + +# 1) Capture global if provided (and non-empty) +- name: Capture global primary rack controller id (if set) + set_fact: + _global_primary_rack_controller: "{{ maas_global_primary_rack_controller | string }}" + when: + - maas_global_primary_rack_controller is defined + - (maas_global_primary_rack_controller | string) | length > 0 + +# 2) If no global, ensure every VLAN declares primary_rack_controller +- name: Build list of VLANs missing primary_rack_controller (when no global set) + set_fact: + _vlans_missing_prc: | + {% set missing = [] %} + {% for pair in (maas_networking | subelements('vlans', skip_missing=True)) %} + {% set fab = pair[0] %} + {% set v = pair[1] %} + {% if v.primary_rack_controller is not defined or (v.primary_rack_controller | string) | length == 0 %} + {% set _ = missing.append(fab.fabric ~ ":VID " ~ (v.vid | string)) %} + {% endif %} + {% endfor %} + {{ missing }} + when: _global_primary_rack_controller is not defined + +- name: Require maas_global_primary_rack_controller or per-VLAN primary_rack_controller + assert: + that: + - (_global_primary_rack is defined) or (_vlans_missing_prc | length == 0) + fail_msg: >- + Missing primary rack controller configuration. + Either set 'maas_global_primary_rack_controller' or add 'primary_rack_controller' + on each VLAN. Missing for: + {{ (_vlans_missing_prc | default([])) | join('\n') }} + when: _global_primary_rack_controller is not defined + +################################################################################ +# Domains +################################################################################ +- name: Collect unique domains from maas_networking + set_fact: + _wanted_domains: >- + {{ + maas_networking + | map(attribute='vlans') | flatten + | map(attribute='subnets') | flatten + | selectattr('domain','defined') + | map(attribute='domain') + | list | unique + }} + +- include_tasks: _auth_header.yml +#- name: Read existing RCs +# uri: +# url: "{{ _maas_api }}/rackcontrollers/" +# method: GET +# headers: { Authorization: "{{ maas_auth_header }}" } +# return_content: true +# register: _domains_resp +# +#- pause: + +- name: Read existing domains + uri: + url: "{{ _maas_api }}/domains/" + method: GET + headers: { Authorization: "{{ maas_auth_header }}" } + return_content: true + register: _domains_resp + +- name: Index domains by name + set_fact: + _domains_by_name: "{{ (_domains_resp.json | default([])) | items2dict(key_name='name', value_name='id') }}" + +- name: Compute domains to create + set_fact: + _new_domains: "{{ _wanted_domains | difference((_domains_by_name.keys() | list)) }}" + +# _wanted_domains must be a real list (use the unique/flatten filter recipe) + +- name: Ensure desired domains exist + include_tasks: networking/domain_create.yml + loop: "{{ _new_domains }}" + loop_control: + loop_var: domain_name +# #no_log: true + +################################################################################ +# Spaces +################################################################################ +- name: Collect unique spaces from maas_networking + set_fact: + _wanted_spaces: >- + {{ + maas_networking + | map(attribute='vlans') | flatten + | map(attribute='subnets') | flatten + | selectattr('space','defined') + | map(attribute='space') + | list | unique + }} + +- include_tasks: _auth_header.yml + #no_log: true + +- name: Read existing spaces + uri: + url: "{{ _maas_api }}/spaces/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _spaces_resp + #no_log: true + +- name: Index spaces by name + set_fact: + _spaces_by_name: "{{ (_spaces_resp.json | default([])) | items2dict(key_name='name', value_name='id') }}" + +- name: Compute spaces to create + set_fact: + _new_spaces: "{{ _wanted_spaces | difference((_spaces_by_name.keys() | list)) }}" + +- name: Ensure desired spaces exist + include_tasks: networking/space_create.yml + loop: "{{ _new_spaces }}" + loop_control: + loop_var: space_name + #no_log: true + +################################################################################ +# Fabrics +################################################################################ +- include_tasks: _auth_header.yml + #no_log: true + +- name: Read fabrics + uri: + url: "{{ _maas_api }}/fabrics/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _fabrics_resp + #no_log: true + +- name: Index fabrics by name + set_fact: + _fabric_by_name: "{{ (_fabrics_resp.json | default([])) | items2dict(key_name='name', value_name='id') }}" + +- name: Collect desired fabric names from maas_networking + set_fact: + _wanted_fabrics: "{{ maas_networking | map(attribute='fabric') | list | unique }}" + +- name: Compute fabrics to create + set_fact: + _new_fabrics: "{{ _wanted_fabrics | difference((_fabric_by_name.keys() | list)) }}" + +- name: Ensure fabrics exist + include_tasks: networking/fabric_create.yml + loop: "{{ _new_fabrics }}" + loop_control: + loop_var: fabric_name + #no_log: true + +# Refresh fabrics after creates +- include_tasks: _auth_header.yml + #no_log: true + +- name: Refresh fabrics + uri: + url: "{{ _maas_api }}/fabrics/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _fabrics_resp2 + #no_log: true + +- name: Re-index fabrics + set_fact: + _fabric_by_name: "{{ (_fabrics_resp2.json | default([])) | items2dict(key_name='name', value_name='id') }}" + +################################################################################ +# VLANs +################################################################################ +- name: Validate VLAN names + loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" + loop_control: + loop_var: item + assert: + that: + - item.1.name is match('^[a-z0-9-]+$') + fail_msg: "Invalid VLAN name '{{ item.1.name }}' — only lowercase letters and dashes are allowed." + +# Read VLANs per fabric (looped helper so each GET has fresh auth) +- name: init raw vlans holder + set_fact: + _vlans_raw_by_fabric: {} + +- name: Read VLANs for each fabric + include_tasks: networking/fabric_vlans_read.yml + loop: "{{ maas_networking }}" + loop_control: + loop_var: fab_obj + #no_log: true + +- name: Build VLAN index (first pass) + include_tasks: networking/vlan_build_index.yml + +- name: Create VLANs that are missing + vars: + _fname: "{{ pair.0.fabric }}" + vlan: "{{ pair.1 }}" + _vrec: "{{ _vlan_index.get(_fname, {}) }}" + # handle both string and int vid keys so creation works regardless of index build + _exists: "{{ (_vrec.get(vlan.vid | string) is not none) or (_vrec.get(vlan.vid) is not none) }}" + include_tasks: networking/vlan_create.yml + loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" + loop_control: + loop_var: pair + label: "{{ pair.0.fabric }}:{{ pair.1.vid }}" + when: not _exists + +# Refresh VLANs after creates (read again via helper) and rebuild index +- name: Reset raw vlans holder + set_fact: + _vlans_raw_by_fabric: {} + +- name: Re-read VLANs for each fabric + include_tasks: networking/fabric_vlans_read.yml + loop: "{{ maas_networking }}" + loop_control: + loop_var: fab_obj + +- name: Build VLAN index (second pass) + include_tasks: networking/vlan_build_index.yml + +################################################################################ +# Subnets (create/update DNS + ranges) BEFORE enabling VLAN DHCP +################################################################################ +# Build (fabric, vlan) pairs +- name: Build list of fabric/vlan pairs + set_fact: + _fabric_vlans: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" + +- name: Build list of (fabric, vlan, subnet) triples + set_fact: + _subnet_triples: | + {% set out = [] %} + {% for pair in _fabric_vlans %} + {% set fab = pair[0] %} + {% set vlan = pair[1] %} + {% for sn in vlan.subnets | default([]) %} + {% set _ = out.append([fab, vlan, sn]) %} + {% endfor %} + {% endfor %} + {{ out }} + +- name: Ensure subnets, DNS servers, and IP ranges + include_tasks: networking/subnet_apply.yml + vars: + trio: "{{ item }}" + loop: "{{ _subnet_triples }}" + loop_control: + label: "{{ item[0].fabric }} : VID {{ item[1].vid }} : {{ item[2].cidr }}" + +################################################################################ +# VLAN property updates (name/mtu/dhcp_on) AFTER ranges exist +################################################################################ +################################################################################ +# VLAN property updates (name/mtu/dhcp_on/space) AFTER ranges exist +################################################################################ + +## Resolve the VLAN id safely (handles string/int VID keys) +#- name: Resolve VLAN id for update +# vars: +# _fname: "{{ pair.0.fabric }}" +# vlan: "{{ pair.1 }}" +# set_fact: +# _vobj: >- +# {{ +# _vlan_index[_fname].get(vlan.vid|string) +# or _vlan_index[_fname].get(vlan.vid) +# }} +# _vlan_id: "{{ _vobj.id if (_vobj is defined and _vobj) else None }}" +# loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" +# loop_control: +# loop_var: pair +# label: "{{ pair.0.fabric }}:{{ pair.1.vid }}" +# +#- name: Ensure VLAN exists in index before updating +# assert: +# that: +# - _vlan_id is not none +# fail_msg: >- +# VLAN {{ pair.1.vid }} on fabric {{ pair.0.fabric }} not found in _vlan_index. +# Known VIDs: {{ _vlan_index[pair.0.fabric] | dict2items | map(attribute='key') | list }} +# loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" +# loop_control: +# loop_var: pair +# label: "{{ pair.0.fabric }}:{{ pair.1.vid }}" +# +## Build update body (name/mtu/space + dhcp_on only if we saw a dynamic range in inventory) +#- name: Build VLAN update body +# vars: +# _fname: "{{ pair.0.fabric }}" +# vlan: "{{ pair.1 }}" +# +# # unique space from subnets (if exactly one specified) +# _spaces_list: >- +# {{ +# (vlan.subnets | default([])) +# | selectattr('space','defined') +# | map(attribute='space') | list | unique +# }} +# _desired_space: "{{ _spaces_list[0] if (_spaces_list | length) == 1 else omit }}" +# +# # does inventory declare at least one dynamic range on any subnet of this VLAN? +# _has_dynamic_for_vlan: >- +# {{ +# (vlan.subnets | default([])) +# | selectattr('ip_ranges','defined') +# | map(attribute='ip_ranges') | flatten +# | selectattr('type','equalto','dynamic') +# | list | length > 0 +# }} +# set_fact: +# _body: >- +# {{ +# {'name': vlan.name} +# | combine( (vlan.mtu is defined) | ternary({'mtu': vlan.mtu}, {}), recursive=True ) +# | combine( (_desired_space is not none) | ternary({'space': _desired_space}, {}), recursive=True ) +# | combine( +# (vlan.dhcp_on | default(false) | bool and _has_dynamic_for_vlan) +# | ternary({'dhcp_on': true}, {}), recursive=True +# ) +# }} +# loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" +# loop_control: +# loop_var: pair +# label: "{{ pair.0.fabric }}:{{ pair.1.vid }}" + +## Do the actual VLAN update (expects _vlan_id and _body set by the two tasks above) +#- name: Call VLAN Update tasks +# include_tasks: networking/vlan_update.yml +# loop: "{{ maas_networking | subelements('vlans', skip_missing=True) | map('join', ':') | list }}" +# loop_control: +# label: "{{ item }}" + +- name: Call VLAN Update tasks + include_tasks: networking/vlan_update.yml + loop: "{{ maas_networking | subelements('vlans', skip_missing=True) }}" + loop_control: + loop_var: pair + label: "{{ pair.0.fabric }}:{{ pair.1.vid }}" + vars: + _fname: "{{ pair.0.fabric }}" + vlan: "{{ pair.1 }}" diff --git a/roles/maas/tasks/networking/domain_create.yml b/roles/maas/tasks/networking/domain_create.yml new file mode 100644 index 00000000..a4f37611 --- /dev/null +++ b/roles/maas/tasks/networking/domain_create.yml @@ -0,0 +1,22 @@ +--- +# Expects: _maas_api, maas_api_key, domain_name +# Builds a fresh OAuth header and creates the domain + +- include_tasks: ../_auth_header.yml +# no_log: true + +- uri: + url: "{{ _maas_api }}/domains/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: + name: "{{ domain_name }}" + status_code: [200, 201, 409] + use_netrc: false + return_content: false + validate_certs: true + #no_log: true diff --git a/roles/maas/tasks/networking/fabric_create.yml b/roles/maas/tasks/networking/fabric_create.yml new file mode 100644 index 00000000..317747ea --- /dev/null +++ b/roles/maas/tasks/networking/fabric_create.yml @@ -0,0 +1,19 @@ +--- +# Expects: _maas_api, maas_api_key, fabric_name + +- include_tasks: ../_auth_header.yml + no_log: true + +- uri: + url: "{{ _maas_api }}/fabrics/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: + name: "{{ fabric_name }}" + status_code: [200, 201, 409] + use_netrc: false + no_log: true diff --git a/roles/maas/tasks/networking/fabric_vlans_read.yml b/roles/maas/tasks/networking/fabric_vlans_read.yml new file mode 100644 index 00000000..1e6f212e --- /dev/null +++ b/roles/maas/tasks/networking/fabric_vlans_read.yml @@ -0,0 +1,20 @@ +--- +# Expects: _maas_api, maas_api_key, _fabric_by_name, fab_obj (with .fabric) + +- include_tasks: ../_auth_header.yml + no_log: true + +- uri: + url: "{{ _maas_api }}/fabrics/{{ _fabric_by_name[fab_obj.fabric] }}/vlans/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _vlans_resp + no_log: true + +- set_fact: + _vlans_raw_by_fabric: "{{ _vlans_raw_by_fabric | combine({ fab_obj.fabric: (_vlans_resp.json | default([])) }, recursive=True) }}" + no_log: true diff --git a/roles/maas/tasks/networking/space_create.yml b/roles/maas/tasks/networking/space_create.yml new file mode 100644 index 00000000..144c206f --- /dev/null +++ b/roles/maas/tasks/networking/space_create.yml @@ -0,0 +1,19 @@ +--- +# Expects: _maas_api, maas_api_key, space_name + +- include_tasks: ../_auth_header.yml + no_log: true + +- uri: + url: "{{ _maas_api }}/spaces/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: + name: "{{ space_name }}" + status_code: [200, 201, 409] + use_netrc: false + no_log: true diff --git a/roles/maas/tasks/networking/subnet_apply.yml b/roles/maas/tasks/networking/subnet_apply.yml new file mode 100644 index 00000000..b4290624 --- /dev/null +++ b/roles/maas/tasks/networking/subnet_apply.yml @@ -0,0 +1,355 @@ +--- +# Expects: trio=[fabric_obj, vlan_obj, subnet_obj], _vlan_index, _maas_api, maas_auth_header + +# 0) Validate input triple +- name: Verify triple input + assert: + that: + - trio is defined + - trio | length == 3 + fail_msg: "subnet_apply.yml expects trio=[fabric, vlan, subnet], got: {{ trio | default('undefined') }}" + +# 1) Unpack triple +- name: Extract fabric, vlan, and subnet + set_fact: + _fname: "{{ trio[0].fabric }}" + vlan: "{{ trio[1] }}" + subnet: "{{ trio[2] }}" + +# 2) Ensure VLAN exists in index & resolve its numeric id +- name: Ensure VLAN is present in index + assert: + that: + - _vlan_index[_fname] is defined + - _vlan_index[_fname][vlan.vid | string] is defined + fail_msg: >- + VLAN {{ vlan.vid }} not found in index for fabric {{ _fname }}. + Known vids here: {{ (_vlan_index.get(_fname, {}) | dict2items | map(attribute='key') | list) }} + +- name: Resolve VLAN object from index + set_fact: + _vobj: "{{ _vlan_index[_fname][vlan.vid | string] }}" + +- name: Extract VLAN numeric id + set_fact: + _vid: "{{ _vobj.id }}" + +# 3) Read subnets (global) and normalize to a list +- include_tasks: ../_auth_header.yml + no_log: true + +- name: Read subnets (global list) + uri: + url: "{{ _maas_api }}/subnets/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _subnets_resp + no_log: true + +- name: Normalize subnets list + set_fact: + _subnets_list: >- + {{ + (_subnets_resp.json.subnets + if (_subnets_resp.json is mapping and 'subnets' in _subnets_resp.json) + else (_subnets_resp.json | default([]))) + }} + +# Find the existing subnet id by CIDR (none if missing) +- name: Extract existing subnet id by CIDR + set_fact: + _existing_subnet_id: >- + {{ + (_subnets_list + | selectattr('cidr','equalto', subnet.cidr) + | map(attribute='id') | list | first) + | default(none) + }} + +- name: Decide if subnet already exists + set_fact: + _subnet_exists: "{{ _existing_subnet_id is not none and (_existing_subnet_id|string)|length > 0 }}" + +# Working subnet id variable (may be set later by create) +- set_fact: + _subnet_id: "{{ _existing_subnet_id }}" + +# figure out the parent VLAN (we’re looping subelements('subnets'), so pair.0 is the VLAN) +- name: Resolve VLAN id for this subnet + set_fact: + _vlan_id: >- + {{ + ( + _vlan_index[pair.0.fabric][(pair.0.vid | string)].id + if (pair is defined and pair.0 is defined and pair.0.vid is defined) + else _vlan_index[_fname][(vlan.vid | string)].id + ) | string + }} + +#- name: Locate existing subnet by CIDR +# set_fact: +# _existing_subnet: "{{ (_subnets_resp.json | default([])) | selectattr('cidr','equalto', subnet.cidr) | list | first | default(none) }}" + +# 4) CREATE if missing +- name: Build subnet create body + set_fact: + _subnet_create_body: >- + {{ + {'cidr': subnet.cidr, 'vlan': _vid} + | combine( (subnet.gateway is defined) | ternary({'gateway_ip': subnet.gateway}, {}), recursive=True ) + | combine( (subnet.managed is defined) | ternary({'managed': subnet.managed|bool}, {}), recursive=True ) + }} + +- include_tasks: ../_auth_header.yml + when: not _subnet_exists + no_log: true + +- name: Create subnet (if missing) + uri: + url: "{{ _maas_api }}/subnets/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ _subnet_create_body }}" + status_code: [200, 201, 409] + return_content: true + use_netrc: false + register: _subnet_create_resp + when: not _subnet_exists + no_log: true + +- name: Set final _subnet_id + set_fact: + _subnet_id: >- + {{ + ( + _existing_subnet_id + if _subnet_exists + else ( + _subnet_create_resp.json.id + if (_subnet_create_resp is defined and _subnet_create_resp.json is defined and _subnet_create_resp.json.id is defined) + else none + ) + ) + }} + +- name: Ensure _subnet_id is set (fallback lookup) + set_fact: + _subnet_id: >- + {{ + _subnet_id + if (_subnet_id is not none and (_subnet_id|string)|length > 0) + else ( + (_subnets_list + | selectattr('cidr','equalto', subnet.cidr) + | map(attribute='id') | list | first) | default(none) + ) + }} + +- include_tasks: ../_auth_header.yml + when: _subnet_id is none or (_subnet_id|string)|length == 0 + no_log: true + +- name: Re-read subnets (only if _subnet_id still missing) + uri: + url: "{{ _maas_api }}/subnets/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + register: _subnets_resp_refetch + when: _subnet_id is none or (_subnet_id|string)|length == 0 + no_log: true + +- name: Normalize subnets list (refetch) + set_fact: + _subnets_list: >- + {{ + (_subnets_resp_refetch.json.subnets + if (_subnets_resp_refetch is defined and _subnets_resp_refetch.json is mapping and 'subnets' in _subnets_resp_refetch.json) + else (_subnets_resp_refetch.json | default([]))) + }} + when: _subnets_resp_refetch is defined + +- name: Final fallback - derive _subnet_id from refetch + set_fact: + _subnet_id: >- + {{ + _subnet_id + if (_subnet_id is not none and (_subnet_id|string)|length > 0) + else ( + (_subnets_list + | selectattr('cidr','equalto', subnet.cidr) + | map(attribute='id') | list | first) | default(none) + ) + }} + +# 5) UPDATE if present +- name: Build subnet update body + set_fact: + _subnet_update_body: >- + {{ + {'cidr': subnet.cidr, 'vlan': _vid} + | combine( (subnet.gateway is defined) | ternary({'gateway_ip': subnet.gateway}, {}), recursive=True ) + | combine( (subnet.managed is defined) | ternary({'managed': subnet.managed|bool}, {}), recursive=True ) + }} +# {{ +# {} +# | combine( (subnet.gateway is defined) | ternary({'gateway_ip': subnet.gateway}, {}), recursive=True ) +# | combine( (subnet.managed is defined) | ternary({'managed': subnet.managed|bool}, {}), recursive=True ) +# }} + +- include_tasks: ../_auth_header.yml + when: _subnet_id is not none + no_log: true + +- name: Update subnet (if exists) + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ _subnet_update_body }}" + status_code: [200] + return_content: true + use_netrc: false + when: _subnet_id is not none and (_subnet_id|string)|length > 0 + +# 7) DNS servers +# DNS servers: prefer subnet.dns_servers[], else maas_global_dns_servers +- name: Choose DNS servers for this subnet + set_fact: + _dns_list: "{{ subnet.dns_servers | default(maas_global_dns_servers | default([])) | list }}" + +- include_tasks: ../_auth_header.yml + when: _dns_list | length > 0 and _subnet_id is not none and (_subnet_id|string)|length > 0 + no_log: true + +- name: Set DNS servers on subnet + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ {'dns_servers': _dns_list | join(' ')} }}" + status_code: [200] + use_netrc: false + when: _dns_list | length > 0 and _subnet_id is not none and (_subnet_id|string)|length > 0 + +# 8) IP ranges +# IP ranges (read from top-level /ipranges/, not /subnets/{id}/ipranges/) +- include_tasks: ../_auth_header.yml + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + no_log: true + +- name: Read all ipranges (we'll filter by subnet) + uri: + url: "{{ _maas_api }}/ipranges/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + use_netrc: false + status_code: [200] + register: _all_ranges_resp + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + +# Normalize payload so later tasks don’t depend on .json vs .content +- name: Normalize ipranges payload to a list + set_fact: + _ipranges_list: >- + {{ + _all_ranges_resp.json + if (_all_ranges_resp is defined and _all_ranges_resp.json is defined and _all_ranges_resp.json != '') + else (_all_ranges_resp.content | from_json) + }} + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + - _all_ranges_resp is defined + +- name: Show _subnet_id and ipranges count + debug: + msg: + - "_subnet_id (int) = {{ _subnet_id | int }}" + - "ipranges total = {{ (_ipranges_list | default([])) | length }}" + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + - _ipranges_list is defined + +- name: Build normalized ipranges list + set_fact: + _ipranges_normalized: | + {% set out = [] %} + {% for r in (_ipranges_list | default([])) %} + {% set sid = ((r.subnet.id if (r.subnet is mapping and 'id' in r.subnet) else r.subnet) | int) %} + {% set _ = out.append({ + 'id': r.id, + 'type': r.type, + 'start_ip': r.start_ip, + 'end_ip': r.end_ip, + 'computed_subnet_id': sid + }) %} + {% endfor %} + {{ out }} + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + - _ipranges_list is defined + +- name: Filter normalized ipranges to this subnet (robust int compare) + set_fact: + _subnet_ranges_existing: | + {% set sid = _subnet_id | int %} + {% set out = [] %} + {% for r in (_ipranges_normalized | default([])) %} + {% if (r.computed_subnet_id | int) == sid %} + {% set _ = out.append(r) %} + {% endif %} + {% endfor %} + {{ out }} + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + - _ipranges_normalized is defined + +- name: Create missing ranges + vars: + _exists: >- + {{ + (_subnet_ranges_existing | default([])) + | selectattr('type','equalto', ipr.type | default('reserved')) + | selectattr('start_ip','equalto', ipr.start_ip) + | selectattr('end_ip','equalto', ipr.end_ip) + | list | length > 0 + }} + include_tasks: subnet_range_create.yml + loop: "{{ subnet.ip_ranges | default([]) }}" + loop_control: + loop_var: ipr + label: "{{ ipr.type }} {{ ipr.start_ip }}-{{ ipr.end_ip }}" + when: + - _subnet_id is not none + - subnet.ip_ranges is defined + - not _exists diff --git a/roles/maas/tasks/networking/subnet_range_create.yml b/roles/maas/tasks/networking/subnet_range_create.yml new file mode 100644 index 00000000..577fcb8e --- /dev/null +++ b/roles/maas/tasks/networking/subnet_range_create.yml @@ -0,0 +1,225 @@ +--- +# Expects: _subnet_id, ipr (range spec with type/start_ip/end_ip), maas_auth_header, _subnet_ranges_existing +# Optional: maas_overwrite_ipranges (default: false) + +- name: Default overwrite flag + set_fact: + maas_overwrite_ipranges: "{{ maas_overwrite_ipranges | default(false) | bool }}" + +# Helper facts +- set_fact: + _ipr_type: "{{ ipr.type | default('reserved') }}" + _ipr_start: "{{ ipr.start_ip }}" + _ipr_end: "{{ ipr.end_ip }}" + _overlaps: [] + +# --- exact match detection (boolean, no None pitfalls) --- +- name: Compute exact-match flag for this subnet/type/span + vars: + _want_type: "{{ _ipr_type | string }}" + _want_start: "{{ _ipr_start | string }}" + _want_end: "{{ _ipr_end | string }}" + set_fact: + _exact_exists: >- + {{ + ( + (_subnet_ranges_existing | default([])) + | selectattr('type', 'equalto', _want_type) + | selectattr('start_ip', 'equalto', _want_start) + | selectattr('end_ip', 'equalto', _want_end) + | list | length + ) > 0 + }} + +# (optional) tiny debug so you can see it flip true/false +- name: Tiny debug so you can see it flip true/false + debug: + msg: + - "subnet_id: {{ _subnet_id }}" + - "existing ranges on this subnet: {{ _subnet_ranges_existing | length }}" + - "looking for: {{ _ipr_type }} {{ _ipr_start }}-{{ _ipr_end }}" + - "exact_exists={{ _exact_exists }}" + verbosity: 0 + +# --- overlap detection stays as you had it --- + +# Skip only when an exact already exists +- name: Skip create when exact range already exists + debug: + msg: "IP range already present ({{ _ipr_type }} {{ _ipr_start }}-{{ _ipr_end }}); skipping." + when: _exact_exists + +# Always define _overlaps, even if earlier overlap-compute tasks were skipped +- name: Ensure _overlaps is defined + set_fact: + _overlaps: "{{ _overlaps | default([]) }}" + +- include_tasks: ../_auth_header.yml + no_log: true + +- name: Read all ipranges (server truth) + uri: + url: "{{ _maas_api }}/ipranges/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + status_code: [200] + register: _ipr_read + no_log: true + +- name: Filter ipranges down to this subnet id + set_fact: + _subnet_ranges_existing: >- + {{ (_ipr_read.json | default([])) + | selectattr('subnet.id','equalto', _subnet_id) + | list }} + +# Build tuple/list forms of the new range once +- name: Compute tuple forms of new start/end + set_fact: + _new_start_t: "{{ _ipr_start | split('.') | map('int') | list }}" + _new_end_t: "{{ _ipr_end | split('.') | map('int') | list }}" + +# existing.start <= new.end AND existing.end >= new.start (inclusive) +- name: Accumulate overlaps for this subnet/type/span (inclusive, no ipaddr) + set_fact: + _overlaps: "{{ _overlaps + [r] }}" + loop: "{{ _subnet_ranges_existing | default([]) }}" + loop_control: + loop_var: r + when: + - (r.start_ip | split('.') | map('int') | list) <= _new_end_t + - (r.end_ip | split('.') | map('int') | list) >= _new_start_t + +- name: Debug overlaps (if any) + debug: + msg: + - "Overlaps (ids): {{ _overlaps | map(attribute='id') | list }}" + - "Overlaps (types): {{ _overlaps | map(attribute='type') | list }}" + - "Overlaps (spans): {{ _overlaps | map(attribute='start_ip') | list }} — {{ _overlaps | map(attribute='end_ip') | list }}" + when: _overlaps | length > 0 + +# Fail on overlapping ranges (unless overwrite enabled) +- name: Fail on overlapping ranges (unless overwrite enabled) + fail: + msg: >- + Requested {{ _ipr_type }} range {{ _ipr_start }}-{{ _ipr_end }} + overlaps existing ranges: + {{ (_overlaps | default([])) | map(attribute='start_ip') | list }} - {{ (_overlaps | default([])) | map(attribute='end_ip') | list }}. + Re-run with maas_overwrite_ipranges=true to replace them. + when: + - not _exact_exists + - (_overlaps | default([])) | length > 0 + - not maas_overwrite_ipranges + +- include_tasks: ../_auth_header.yml + no_log: true + +- name: Read this subnet to check for managed=true and dynamic range mismatch + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + status_code: [200] + register: _subnet_read + no_log: true + +- set_fact: + _server_subnet_managed: "{{ (_subnet_read.json.managed | default(false)) | bool }}" + +- name: Fail if subnet is unmanaged but a dynamic range is requested + fail: + msg: >- + Refusing to create a dynamic range on unmanaged subnet id={{ _subnet_id }} + ({{ _subnet_read.json.cidr }}). Set 'managed: true' on the subnet or use a + reserved range instead. Requested: {{ _ipr_type }} {{ _ipr_start }}-{{ _ipr_end }}. + when: + - _ipr_type == 'dynamic' + - not _server_subnet_managed + +# Delete overlapping ipranges before create +- include_tasks: ../_auth_header.yml + when: + - not _exact_exists + - (_overlaps | default([])) | length > 0 + - maas_overwrite_ipranges + no_log: true + +# before delete loop +- set_fact: + _overlap_ids: "{{ _overlaps | map(attribute='id') | list | unique | list }}" + +- name: Delete overlapping ipranges before create + uri: + url: "{{ _maas_api }}/ipranges/{{ ov_id }}/" + method: DELETE + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + status_code: [200, 204, 404] + return_content: false + loop: "{{ _overlap_ids }}" + loop_control: + loop_var: ov_id + label: "delete id={{ ov_id }}" + failed_when: false + when: + - (_overlaps | length) > 0 + - maas_overwrite_ipranges + - not _exact_exists + no_log: true + +- include_tasks: ../_auth_header.yml + no_log: true + +- name: Read all ipranges again (post-delete) + uri: + url: "{{ _maas_api }}/ipranges/" + method: GET + headers: + Authorization: "{{ maas_auth_header }}" + Accept: application/json + return_content: true + status_code: [200] + register: _ipr_read_after + no_log: true + +- name: Filter ipranges down to this subnet id (post-delete) + set_fact: + _subnet_ranges_existing: >- + {{ (_ipr_read_after.json | default([])) + | selectattr('subnet.id','equalto', _subnet_id) + | list }} + +- include_tasks: ../_auth_header.yml + when: + - not _exact_exists + - ((_overlaps | default([])) | length == 0) or maas_overwrite_ipranges + no_log: true + +- name: Create iprange + uri: + url: "{{ _maas_api }}/ipranges/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: + subnet: "{{ _subnet_id | string }}" + type: "{{ _ipr_type | default('reserved') }}" + start_ip: "{{ _ipr_start }}" + end_ip: "{{ _ipr_end }}" + status_code: [200, 201, 409] + return_content: true + use_netrc: false + register: _range_create_resp + when: + - not _exact_exists + - ((_overlaps | default([])) | length == 0) or maas_overwrite_ipranges diff --git a/roles/maas/tasks/networking/vlan_build_index.yml b/roles/maas/tasks/networking/vlan_build_index.yml new file mode 100644 index 00000000..1b5ef98f --- /dev/null +++ b/roles/maas/tasks/networking/vlan_build_index.yml @@ -0,0 +1,22 @@ +--- +# Build `_vlan_index` as: { "": { "": } } + +# Start clean +- set_fact: + _vlan_index: {} + +# Expect `_vlans_raw_by_fabric` to be a dict like: +# { "tucson-qe": [ {vid: 1300, id: 5011, ...}, ... ], ... } +- name: Normalize VLAN index with string vid keys + set_fact: + _vlan_index: | + {% set out = {} %} + {% for it in (_vlans_raw_by_fabric | default({}) | dict2items) %} + {% set fname = it.key %} + {% set vlist = it.value | default([]) %} + {% set _ = out.update({ fname: {} }) %} + {% for v in vlist %} + {% set _ = out[fname].update({ (v.vid | string): v }) %} + {% endfor %} + {% endfor %} + {{ out }} diff --git a/roles/maas/tasks/networking/vlan_create.yml b/roles/maas/tasks/networking/vlan_create.yml new file mode 100644 index 00000000..9bd49cd3 --- /dev/null +++ b/roles/maas/tasks/networking/vlan_create.yml @@ -0,0 +1,32 @@ +--- +# Expects: _maas_api, maas_api_key, pair, _fabric_by_name +# pair.0 = fabric obj; pair.1 = vlan obj + +- include_tasks: ../_auth_header.yml + no_log: true + +- set_fact: + _fid: "{{ _fabric_by_name[pair.0.fabric] }}" + _vlan_create_body: >- + {{ + {'vid': pair.1.vid} + | combine( (pair.1.name is defined) | ternary({'name': pair.1.name}, {}), recursive=True ) + | combine( (pair.1.description is defined) | ternary({'description': pair.1.description}, {}), recursive=True ) + | combine( (pair.1.mtu is defined) | ternary({'mtu': pair.1.mtu}, {}), recursive=True ) + | combine( (pair.1.space is defined) | ternary({'space': pair.1.space}, {}), recursive=True ) + }} + +# NOTE: dhcp_on is not created here; we set it in the separate "vlan_update" task because +# ipranges must be created first. +- uri: + url: "{{ _maas_api }}/fabrics/{{ _fid }}/vlans/" + method: POST + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ _vlan_create_body }}" + status_code: [200, 201, 409] + return_content: true + use_netrc: false diff --git a/roles/maas/tasks/networking/vlan_update.yml b/roles/maas/tasks/networking/vlan_update.yml new file mode 100644 index 00000000..b42cda2d --- /dev/null +++ b/roles/maas/tasks/networking/vlan_update.yml @@ -0,0 +1,95 @@ +--- +# Expects: _fname, _fabric_by-name, vlan, _vlan_index, _body, _maas_api, maas_auth_header +# (_fname and vlan are often passed from the caller; we normalize if pair=* is used) + +- name: Normalize inputs + set_fact: + _fname: "{{ _fname | default(pair.0.fabric) }}" + _fid: "{{ _fabric_by_name[pair.0.fabric] }}" + vlan: "{{ vlan | default(pair.1) }}" + +- name: Ensure VLAN present in index + assert: + that: + - _vlan_index[_fname] is defined + - _vlan_index[_fname][vlan.vid | string] is defined + fail_msg: >- + VLAN {{ vlan.vid }} not found in index for fabric {{ _fname }}. + Known vids: {{ _vlan_index.get(_fname, {}) | dict2items | map(attribute='key') | list }} + +- name: Clear any stale per-VLAN variables + set_fact: + _vlan_id: "{{ none }}" + _vobj: "{{ none }}" + _prc_candidate: "" + _primary_rack_controller: "{{ none }}" + +- name: Resolve VLAN object + set_fact: + _vobj: "{{ _vlan_index[_fname][vlan.vid | string] }}" + +- name: And ID + set_fact: + _vlan_id: "{{ _vobj.id | string }}" + +# Set the Primary Rack Controller to the VLAN-level defined one. Otherwise empty string. +- name: Compute per-VLAN primary rack controller candidate + set_fact: + _prc_candidate: "{{ (vlan | default({})).get('primary_rack_controller') | default('', true) | string | trim }}" + +# Use the VLAN-level defined PRC discovered above or use the global one. +- name: Decide primary rack controller for this VLAN + set_fact: + _primary_rack_controller: "{{ _prc_candidate if (_prc_candidate | length) > 0 else (_global_primary_rack_controller | default(omit)) }}" + +- name: Build VLAN update body + vars: + _spaces_list: >- + {{ + (vlan.subnets | default([])) + | selectattr('space','defined') + | map(attribute='space') | list | unique + }} + _desired_space: "{{ _spaces_list[0] if (_spaces_list | length) == 1 else omit }}" + + _has_dynamic_for_vlan: >- + {{ + (vlan.subnets | default([])) + | selectattr('ip_ranges','defined') + | map(attribute='ip_ranges') | flatten + | selectattr('type','equalto','dynamic') + | list | length > 0 + }} + set_fact: + _vlan_update_body: >- + {{ + {'name': vlan.name} + | combine( (vlan.mtu is defined) | ternary({'mtu': vlan.mtu}, {}), recursive=True ) + | combine( (_desired_space is not none) | ternary({'space': _desired_space}, {}), recursive=True ) + | combine( + (vlan.dhcp_on | default(false) | bool and (_primary_rack_controller is defined)) + | ternary({'primary_rack': _primary_rack_controller}, {}), recursive=True + ) + | combine( + (vlan.dhcp_on | default(false) | bool and _has_dynamic_for_vlan) + | ternary({'dhcp_on': true}, {}), recursive=True + ) + }} + +- include_tasks: ../_auth_header.yml + no_log: true + +- name: Update VLAN properties + uri: + url: "{{ _maas_api }}/fabrics/{{ _fid }}/vlans/{{ vlan.vid }}/" + method: PUT + headers: + Authorization: "{{ maas_auth_header }}" + Content-Type: application/x-www-form-urlencoded + Accept: application/json + body_format: form-urlencoded + body: "{{ _vlan_update_body }}" + status_code: [200] + return_content: true + use_netrc: false + no_log: true diff --git a/roles/maas/tasks/networking_subnet.yml b/roles/maas/tasks/networking_subnet.yml new file mode 100644 index 00000000..8bcdf344 --- /dev/null +++ b/roles/maas/tasks/networking_subnet.yml @@ -0,0 +1,133 @@ +--- +# Expects: +# - _maas_api +# - maas_auth_header +# - _fabric_by_name +# - _vlan_index +# - trio (tuple: [fabric_obj, vlan_obj, subnet_obj]) + +- name: Unpack current triple + set_fact: + _fname: "{{ trio.0.fabric }}" + vlan: "{{ trio.1 }}" + subnet: "{{ trio.2 }}" + _vobj: "{{ _vlan_index[_fname][vlan.vid] }}" + _vid: "{{ _vobj.id }}" + +- name: Read existing subnets on VLAN + uri: + url: "{{ _maas_api }}/vlans/{{ _vid }}/subnets/" + method: GET + headers: { Authorization: "{{ maas_auth_header }}" } + return_content: true + register: _subnets_resp + +- name: Get existing subnet (by CIDR) if present + set_fact: + _existing_subnet: "{{ (_subnets_resp.json | default([])) | selectattr('cidr','equalto', subnet.cidr) | list | first | default(None) }}" + +- name: Build create body for subnet + set_fact: + _subnet_create_body: >- + {{ + {'cidr': subnet.cidr, 'vlan': _vid} + | combine( (subnet.gateway is defined) | ternary({'gateway_ip': subnet.gateway}, {}), recursive=True ) + | combine( (subnet.space is defined) | ternary({'space': subnet.space}, {}), recursive=True ) + | combine( (subnet.managed is defined) | ternary({'managed': subnet.managed|bool}, {}), recursive=True ) + }} + +- name: Create subnet if missing + when: _existing_subnet is none + uri: + url: "{{ _maas_api }}/subnets/" + method: POST + headers: { Authorization: "{{ maas_auth_header }}" } + body_format: form-urlencoded + body: "{{ _subnet_create_body }}" + status_code: [200, 201, 409] + return_content: true + +- name: Build update body for subnet + set_fact: + _subnet_update_body: >- + {{ + {} + | combine( (subnet.gateway is defined) | ternary({'gateway_ip': subnet.gateway}, {}), recursive=True ) + | combine( (subnet.space is defined) | ternary({'space': subnet.space}, {}), recursive=True ) + | combine( (subnet.managed is defined) | ternary({'managed': subnet.managed|bool}, {}), recursive=True ) + }} + +- name: Update subnet if exists + when: _existing_subnet is not none + uri: + url: "{{ _maas_api }}/subnets/{{ _existing_subnet.id }}/" + method: POST + headers: { Authorization: "{{ maas_auth_header }}" } + body_format: form-urlencoded + body: "{{ _subnet_update_body }}" + status_code: [200, 201] + return_content: true + +- name: Re-read subnets to get current subnet_id + uri: + url: "{{ _maas_api }}/vlans/{{ _vid }}/subnets/" + method: GET + headers: { Authorization: "{{ maas_auth_header }}" } + return_content: true + register: _subnets_after + +- name: Compute subnet id + set_fact: + _subnet_id: "{{ (_subnets_after.json | default([])) | selectattr('cidr','equalto', subnet.cidr) | map(attribute='id') | first }}" + +- name: Determine DNS servers for subnet (per-subnet or global) + set_fact: + _dns_list: "{{ subnet.dns_servers | default(maas_global_dns_servers | default([])) | list }}" + +- name: Set DNS servers on subnet when provided + when: _dns_list | length > 0 + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/" + method: POST + headers: { Authorization: "{{ maas_auth_header }}" } + body_format: form-urlencoded + body: "{{ {'dns_servers': _dns_list | join(' ')} }}" + status_code: [200, 201] + +- name: Ensure IP ranges on subnet (if any) + when: subnet.ip_ranges is defined + block: + - name: Read existing ranges + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/ipranges/" + method: GET + headers: { Authorization: "{{ maas_auth_header }}" } + return_content: true + register: _ranges_resp + + - name: Create/ensure each range (by type/start/end) + vars: + ipr_body: >- + {{ + {'type': ipr.type | default('reserved'), + 'start_ip': ipr.start_ip, + 'end_ip': ipr.end_ip} + }} + exists: >- + {{ + (_ranges_resp.json | default([])) + | selectattr('type','equalto', ipr.type | default('reserved')) + | selectattr('start_ip','equalto', ipr.start_ip) + | selectattr('end_ip','equalto', ipr.end_ip) + | list | first | default(None) + }} + when: exists is none + uri: + url: "{{ _maas_api }}/subnets/{{ _subnet_id }}/ipranges/" + method: POST + headers: { Authorization: "{{ maas_auth_header }}" } + body_format: form-urlencoded + body: "{{ ipr_body }}" + status_code: [200, 201, 409] + loop: "{{ subnet.ip_ranges }}" + loop_control: { loop_var: ipr } -- 2.47.3