From: Guillaume Abrioux Date: Mon, 10 Dec 2018 14:46:32 +0000 (+0100) Subject: introduce new role ceph-facts X-Git-Tag: v4.0.0beta1~104 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=0eb56e36f8ce52015aa6c343faccd589e5fd2c6c;p=ceph-ansible.git introduce new role ceph-facts sometimes we play the whole role `ceph-defaults` just to access the default value of some variables. It means we play the `facts.yml` part in this role while it's not desired. Splitting this role will speedup the playbook. Closes: #3282 Signed-off-by: Guillaume Abrioux --- diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml index fa5a2f460..393e6e6f7 100644 --- a/infrastructure-playbooks/add-osd.yml +++ b/infrastructure-playbooks/add-osd.yml @@ -45,6 +45,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-validate @@ -68,6 +71,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index ccc69ebf7..6b0969056 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -321,6 +321,10 @@ name: ceph-defaults private: false + - import_role: + name: ceph-facts + private: false + - name: get all the running osds shell: | systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service" @@ -588,6 +592,10 @@ name: ceph-defaults private: false + - import_role: + name: ceph-facts + private: false + - name: show container list on all the nodes (should be empty) command: > {{ container_binary }} ps --filter='name=ceph' -a -q diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index ffda4d7e9..5547a8b81 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -120,6 +120,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -250,6 +252,8 @@ tasks: - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - name: non container - get current fsid command: "ceph --cluster {{ cluster }} fsid" @@ -330,6 +334,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -395,6 +401,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -487,6 +495,8 @@ tasks: - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - name: set_fact docker_exec_cmd_osd set_fact: @@ -538,6 +548,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -587,6 +599,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -644,6 +658,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -697,6 +713,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -753,6 +771,8 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: @@ -785,6 +805,8 @@ tasks: - import_role: name: ceph-defaults + - import_role: + name: ceph-facts - import_role: name: ceph-handler - import_role: diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml index 84d8d2270..88634525e 100644 --- a/infrastructure-playbooks/shrink-mon.yml +++ b/infrastructure-playbooks/shrink-mon.yml @@ -70,6 +70,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + # post_tasks for preceding import - name: pick a monitor different than the one we want to remove set_fact: diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 40b0c0d66..b791a8830 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -59,6 +59,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + post_tasks: - name: set_fact docker_exec_cmd build docker exec command (containerized) set_fact: diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index e702d8c35..8f931ddb7 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -114,10 +114,16 @@ tasks: - import_role: name: ceph-defaults + + - import_role: + name: ceph-facts + - import_role: name: ceph-handler + - import_role: name: ceph-container-common + - import_role: name: ceph-mon @@ -176,6 +182,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler @@ -292,6 +301,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler @@ -363,6 +375,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler @@ -413,6 +428,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler @@ -462,6 +480,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler @@ -516,6 +537,9 @@ - import_role: name: ceph-defaults + - import_role: + name: ceph-facts + - import_role: name: ceph-handler diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml deleted file mode 100644 index 08ef682b3..000000000 --- a/roles/ceph-defaults/tasks/facts.yml +++ /dev/null @@ -1,271 +0,0 @@ ---- -- name: check if it is atomic host - stat: - path: /run/ostree-booted - register: stat_ostree - -- name: set_fact is_atomic - set_fact: - is_atomic: "{{ stat_ostree.stat.exists }}" - -- name: check if podman binary is present - stat: - path: /usr/bin/podman - register: podman_binary - -- name: set_fact is_podman - set_fact: - is_podman: "{{ podman_binary.stat.exists }}" - -- name: set_fact container_binary - set_fact: - container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}" - when: containerized_deployment - -- name: set_fact monitor_name ansible_hostname - set_fact: - monitor_name: "{{ ansible_hostname }}" - when: - - not mon_use_fqdn - -- name: set_fact monitor_name ansible_fqdn - set_fact: - monitor_name: "{{ ansible_fqdn }}" - when: - - mon_use_fqdn - -- name: set_fact docker_exec_cmd - set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - containerized_deployment - - groups.get(mon_group_name, []) | length > 0 - -# this task shouldn't run in a rolling_update situation -# because it blindly picks a mon, which may be down because -# of the rolling update -- name: is ceph running already? - command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" - changed_when: false - failed_when: false - check_mode: no - register: ceph_current_status - run_once: true - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not rolling_update - - groups.get(mon_group_name, []) | length > 0 - -# We want this check to be run only on the first node -- name: check if {{ fetch_directory }} directory exists - stat: - path: "{{ fetch_directory }}/monitor_keyring.conf" - delegate_to: localhost - become: false - register: monitor_keyring_conf - run_once: true - -# set this as a default when performing a rolling_update -# so the rest of the tasks here will succeed -- name: set_fact ceph_current_status rc 1 - set_fact: - ceph_current_status: - rc: 1 - when: - - rolling_update or groups.get(mon_group_name, []) | length == 0 - -- name: create a local fetch directory if it does not exist - file: - path: "{{ fetch_directory }}" - state: directory - delegate_to: localhost - changed_when: false - become: false - when: - - (cephx or generate_fsid) - -- name: set_fact ceph_current_status (convert to json) - set_fact: - ceph_current_status: "{{ ceph_current_status.stdout | from_json }}" - when: - - not rolling_update - - ceph_current_status.rc == 0 - -- name: set_fact fsid from ceph_current_status - set_fact: - fsid: "{{ ceph_current_status.fsid }}" - when: - - ceph_current_status.fsid is defined - -# Set ceph_release to ceph_stable by default -- name: set_fact ceph_release ceph_stable_release - set_fact: - ceph_release: "{{ ceph_stable_release }}" - -- name: generate cluster fsid - shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf - args: - creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" - register: cluster_uuid - delegate_to: localhost - become: false - when: - - generate_fsid - - ceph_current_status.fsid is undefined - -- name: reuse cluster fsid when cluster is already running - shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf - args: - creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" - delegate_to: localhost - become: false - when: - - ceph_current_status.fsid is defined - -- name: read cluster fsid if it already exists - command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf - args: - removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf" - delegate_to: localhost - changed_when: false - register: cluster_uuid - become: false - check_mode: no - when: - - generate_fsid - -- name: set_fact fsid - set_fact: - fsid: "{{ cluster_uuid.stdout }}" - when: - - generate_fsid - -- name: set_fact mds_name ansible_hostname - set_fact: - mds_name: "{{ ansible_hostname }}" - when: - - not mds_use_fqdn - -- name: set_fact mds_name ansible_fqdn - set_fact: - mds_name: "{{ ansible_fqdn }}" - when: - - mds_use_fqdn - -- name: set_fact rbd_client_directory_owner ceph - set_fact: - rbd_client_directory_owner: ceph - when: - - rbd_client_directory_owner is not defined - or not rbd_client_directory_owner - -- name: set_fact rbd_client_directory_group rbd_client_directory_group - set_fact: - rbd_client_directory_group: ceph - when: - - rbd_client_directory_group is not defined - or not rbd_client_directory_group - -- name: set_fact rbd_client_directory_mode 0770 - set_fact: - rbd_client_directory_mode: "0770" - when: - - rbd_client_directory_mode is not defined - or not rbd_client_directory_mode - -- name: resolve device link(s) - command: readlink -f {{ item }} - changed_when: false - with_items: "{{ devices }}" - register: devices_prepare_canonicalize - when: - - devices is defined - - inventory_hostname in groups.get(osd_group_name, []) - - not osd_auto_discovery|default(False) - - osd_scenario|default('dummy') != 'lvm' - -- name: set_fact build devices from resolved symlinks - set_fact: - devices: "{{ devices | default([]) + [ item.stdout ] }}" - with_items: "{{ devices_prepare_canonicalize.results }}" - when: - - devices is defined - - inventory_hostname in groups.get(osd_group_name, []) - - not osd_auto_discovery|default(False) - - osd_scenario|default('dummy') != 'lvm' - -- name: set_fact build final devices list - set_fact: - devices: "{{ devices | reject('search','/dev/disk') | list | unique }}" - when: - - devices is defined - - inventory_hostname in groups.get(osd_group_name, []) - - not osd_auto_discovery|default(False) - - osd_scenario|default('dummy') != 'lvm' - -- name: set_fact ceph_uid for debian based system - non container - set_fact: - ceph_uid: 64045 - when: - - not containerized_deployment - - ansible_os_family == 'Debian' - -- name: set_fact ceph_uid for red hat or suse based system - non container - set_fact: - ceph_uid: 167 - when: - - not containerized_deployment - - ansible_os_family in ['RedHat', 'Suse'] - -- name: set_fact ceph_uid for debian based system - container - set_fact: - ceph_uid: 64045 - when: - - containerized_deployment - - ceph_docker_image_tag | string is search("ubuntu") - -- name: set_fact ceph_uid for red hat based system - container - set_fact: - ceph_uid: 167 - when: - - containerized_deployment - - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora") - -- name: set_fact ceph_uid for red hat - set_fact: - ceph_uid: 167 - when: - - containerized_deployment - - ceph_docker_image is search("rhceph") - -- name: set_fact rgw_hostname - set_fact: - rgw_hostname: "{% set _value = ansible_hostname -%} - {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%} - {% if key == ansible_fqdn -%} - {% set _value = key -%} - {% endif -%} - {% endfor -%} - {{ _value }}" - when: - - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - - ceph_current_status['servicemap'] is defined - - ceph_current_status['servicemap']['services'] is defined - - ceph_current_status['servicemap']['services']['rgw'] is defined - -- name: set_fact osd_pool_default_pg_num - set_fact: - osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}" - -- name: set_fact osd_pool_default_size - set_fact: - osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}" - -- name: import_tasks set_monitor_address.yml - import_tasks: set_monitor_address.yml - -- name: import_tasks set_radosgw_address.yml - import_tasks: set_radosgw_address.yml - when: - - inventory_hostname in groups.get(rgw_group_name, []) diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml index 37b7149d2..73b314ff7 100644 --- a/roles/ceph-defaults/tasks/main.yml +++ b/roles/ceph-defaults/tasks/main.yml @@ -1,3 +1 @@ ---- -- name: include facts.yml - include_tasks: facts.yml +--- \ No newline at end of file diff --git a/roles/ceph-defaults/tasks/set_monitor_address.yml b/roles/ceph-defaults/tasks/set_monitor_address.yml deleted file mode 100644 index 7ac15343a..000000000 --- a/roles/ceph-defaults/tasks/set_monitor_address.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -- name: set_fact _monitor_address to monitor_address_block - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - hostvars[item]['monitor_address_block'] is defined - - hostvars[item]['monitor_address_block'] != 'subnet' - -- name: set_fact _monitor_address to monitor_address - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - hostvars[item]['monitor_address'] is defined - - hostvars[item]['monitor_address'] != '0.0.0.0' - -- name: set_fact _monitor_address to monitor_interface - ipv4 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - ip_version == 'ipv4' - - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' - - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0' - - hostvars[item]['monitor_interface'] | default('interface') != 'interface' - -- name: set_fact _monitor_address to monitor_interface - ipv6 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}" - with_items: - - "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - ip_version == 'ipv6' - - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' - - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0' - - hostvars[item]['monitor_interface'] | default('interface') != 'interface' - -- name: set_fact _current_monitor_address - set_fact: - _current_monitor_address: "{{ item.addr }}" - with_items: "{{ _monitor_addresses }}" - when: inventory_hostname == item.name \ No newline at end of file diff --git a/roles/ceph-defaults/tasks/set_radosgw_address.yml b/roles/ceph-defaults/tasks/set_radosgw_address.yml deleted file mode 100644 index b0dcd0331..000000000 --- a/roles/ceph-defaults/tasks/set_radosgw_address.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: set_fact _radosgw_address to radosgw_address_block - set_fact: - _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}" - when: - - radosgw_address_block is defined - - radosgw_address_block != 'subnet' - -- name: set_fact _radosgw_address to radosgw_address - set_fact: - _radosgw_address: "{{ radosgw_address | ipwrap }}" - when: - - radosgw_address is defined - - radosgw_address != '0.0.0.0' - -- block: - - name: set_fact _interface - set_fact: - _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}" - - - name: set_fact _radosgw_address to radosgw_interface - ipv4 - set_fact: - _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}" - when: - - ip_version == 'ipv4' - - - name: set_fact _radosgw_address to radosgw_interface - ipv6 - set_fact: - _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}" - when: - - ip_version == 'ipv6' - when: - - radosgw_address_block == 'subnet' - - radosgw_address == '0.0.0.0' - - radosgw_interface != 'interface' diff --git a/roles/ceph-facts/README.md b/roles/ceph-facts/README.md new file mode 100644 index 000000000..592982dac --- /dev/null +++ b/roles/ceph-facts/README.md @@ -0,0 +1,3 @@ +# Ansible role: ceph-facts + +Documentation is available at http://docs.ceph.com/ceph-ansible/. diff --git a/roles/ceph-facts/defaults/main.yml b/roles/ceph-facts/defaults/main.yml new file mode 100644 index 000000000..73b314ff7 --- /dev/null +++ b/roles/ceph-facts/defaults/main.yml @@ -0,0 +1 @@ +--- \ No newline at end of file diff --git a/roles/ceph-facts/meta/main.yml b/roles/ceph-facts/meta/main.yml new file mode 100644 index 000000000..b834c5308 --- /dev/null +++ b/roles/ceph-facts/meta/main.yml @@ -0,0 +1,17 @@ +--- +galaxy_info: + company: Red Hat + author: Guillaume Abrioux + description: Set some facts for ceph to be deployed + license: Apache + min_ansible_version: 2.7 + platforms: + - name: Ubuntu + versions: + - xenial + - name: EL + versions: + - 7 + galaxy_tags: + - system +dependencies: [] diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml new file mode 100644 index 000000000..08ef682b3 --- /dev/null +++ b/roles/ceph-facts/tasks/facts.yml @@ -0,0 +1,271 @@ +--- +- name: check if it is atomic host + stat: + path: /run/ostree-booted + register: stat_ostree + +- name: set_fact is_atomic + set_fact: + is_atomic: "{{ stat_ostree.stat.exists }}" + +- name: check if podman binary is present + stat: + path: /usr/bin/podman + register: podman_binary + +- name: set_fact is_podman + set_fact: + is_podman: "{{ podman_binary.stat.exists }}" + +- name: set_fact container_binary + set_fact: + container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}" + when: containerized_deployment + +- name: set_fact monitor_name ansible_hostname + set_fact: + monitor_name: "{{ ansible_hostname }}" + when: + - not mon_use_fqdn + +- name: set_fact monitor_name ansible_fqdn + set_fact: + monitor_name: "{{ ansible_fqdn }}" + when: + - mon_use_fqdn + +- name: set_fact docker_exec_cmd + set_fact: + docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - containerized_deployment + - groups.get(mon_group_name, []) | length > 0 + +# this task shouldn't run in a rolling_update situation +# because it blindly picks a mon, which may be down because +# of the rolling update +- name: is ceph running already? + command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" + changed_when: false + failed_when: false + check_mode: no + register: ceph_current_status + run_once: true + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - not rolling_update + - groups.get(mon_group_name, []) | length > 0 + +# We want this check to be run only on the first node +- name: check if {{ fetch_directory }} directory exists + stat: + path: "{{ fetch_directory }}/monitor_keyring.conf" + delegate_to: localhost + become: false + register: monitor_keyring_conf + run_once: true + +# set this as a default when performing a rolling_update +# so the rest of the tasks here will succeed +- name: set_fact ceph_current_status rc 1 + set_fact: + ceph_current_status: + rc: 1 + when: + - rolling_update or groups.get(mon_group_name, []) | length == 0 + +- name: create a local fetch directory if it does not exist + file: + path: "{{ fetch_directory }}" + state: directory + delegate_to: localhost + changed_when: false + become: false + when: + - (cephx or generate_fsid) + +- name: set_fact ceph_current_status (convert to json) + set_fact: + ceph_current_status: "{{ ceph_current_status.stdout | from_json }}" + when: + - not rolling_update + - ceph_current_status.rc == 0 + +- name: set_fact fsid from ceph_current_status + set_fact: + fsid: "{{ ceph_current_status.fsid }}" + when: + - ceph_current_status.fsid is defined + +# Set ceph_release to ceph_stable by default +- name: set_fact ceph_release ceph_stable_release + set_fact: + ceph_release: "{{ ceph_stable_release }}" + +- name: generate cluster fsid + shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf + args: + creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" + register: cluster_uuid + delegate_to: localhost + become: false + when: + - generate_fsid + - ceph_current_status.fsid is undefined + +- name: reuse cluster fsid when cluster is already running + shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf + args: + creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf" + delegate_to: localhost + become: false + when: + - ceph_current_status.fsid is defined + +- name: read cluster fsid if it already exists + command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf + args: + removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf" + delegate_to: localhost + changed_when: false + register: cluster_uuid + become: false + check_mode: no + when: + - generate_fsid + +- name: set_fact fsid + set_fact: + fsid: "{{ cluster_uuid.stdout }}" + when: + - generate_fsid + +- name: set_fact mds_name ansible_hostname + set_fact: + mds_name: "{{ ansible_hostname }}" + when: + - not mds_use_fqdn + +- name: set_fact mds_name ansible_fqdn + set_fact: + mds_name: "{{ ansible_fqdn }}" + when: + - mds_use_fqdn + +- name: set_fact rbd_client_directory_owner ceph + set_fact: + rbd_client_directory_owner: ceph + when: + - rbd_client_directory_owner is not defined + or not rbd_client_directory_owner + +- name: set_fact rbd_client_directory_group rbd_client_directory_group + set_fact: + rbd_client_directory_group: ceph + when: + - rbd_client_directory_group is not defined + or not rbd_client_directory_group + +- name: set_fact rbd_client_directory_mode 0770 + set_fact: + rbd_client_directory_mode: "0770" + when: + - rbd_client_directory_mode is not defined + or not rbd_client_directory_mode + +- name: resolve device link(s) + command: readlink -f {{ item }} + changed_when: false + with_items: "{{ devices }}" + register: devices_prepare_canonicalize + when: + - devices is defined + - inventory_hostname in groups.get(osd_group_name, []) + - not osd_auto_discovery|default(False) + - osd_scenario|default('dummy') != 'lvm' + +- name: set_fact build devices from resolved symlinks + set_fact: + devices: "{{ devices | default([]) + [ item.stdout ] }}" + with_items: "{{ devices_prepare_canonicalize.results }}" + when: + - devices is defined + - inventory_hostname in groups.get(osd_group_name, []) + - not osd_auto_discovery|default(False) + - osd_scenario|default('dummy') != 'lvm' + +- name: set_fact build final devices list + set_fact: + devices: "{{ devices | reject('search','/dev/disk') | list | unique }}" + when: + - devices is defined + - inventory_hostname in groups.get(osd_group_name, []) + - not osd_auto_discovery|default(False) + - osd_scenario|default('dummy') != 'lvm' + +- name: set_fact ceph_uid for debian based system - non container + set_fact: + ceph_uid: 64045 + when: + - not containerized_deployment + - ansible_os_family == 'Debian' + +- name: set_fact ceph_uid for red hat or suse based system - non container + set_fact: + ceph_uid: 167 + when: + - not containerized_deployment + - ansible_os_family in ['RedHat', 'Suse'] + +- name: set_fact ceph_uid for debian based system - container + set_fact: + ceph_uid: 64045 + when: + - containerized_deployment + - ceph_docker_image_tag | string is search("ubuntu") + +- name: set_fact ceph_uid for red hat based system - container + set_fact: + ceph_uid: 167 + when: + - containerized_deployment + - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora") + +- name: set_fact ceph_uid for red hat + set_fact: + ceph_uid: 167 + when: + - containerized_deployment + - ceph_docker_image is search("rhceph") + +- name: set_fact rgw_hostname + set_fact: + rgw_hostname: "{% set _value = ansible_hostname -%} + {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%} + {% if key == ansible_fqdn -%} + {% set _value = key -%} + {% endif -%} + {% endfor -%} + {{ _value }}" + when: + - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) + - ceph_current_status['servicemap'] is defined + - ceph_current_status['servicemap']['services'] is defined + - ceph_current_status['servicemap']['services']['rgw'] is defined + +- name: set_fact osd_pool_default_pg_num + set_fact: + osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}" + +- name: set_fact osd_pool_default_size + set_fact: + osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}" + +- name: import_tasks set_monitor_address.yml + import_tasks: set_monitor_address.yml + +- name: import_tasks set_radosgw_address.yml + import_tasks: set_radosgw_address.yml + when: + - inventory_hostname in groups.get(rgw_group_name, []) diff --git a/roles/ceph-facts/tasks/main.yml b/roles/ceph-facts/tasks/main.yml new file mode 100644 index 000000000..37b7149d2 --- /dev/null +++ b/roles/ceph-facts/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: include facts.yml + include_tasks: facts.yml diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml new file mode 100644 index 000000000..7ac15343a --- /dev/null +++ b/roles/ceph-facts/tasks/set_monitor_address.yml @@ -0,0 +1,50 @@ +--- +- name: set_fact _monitor_address to monitor_address_block + set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}" + with_items: + - "{{ groups.get(mon_group_name, []) }}" + when: + - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" + - hostvars[item]['monitor_address_block'] is defined + - hostvars[item]['monitor_address_block'] != 'subnet' + +- name: set_fact _monitor_address to monitor_address + set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}" + with_items: + - "{{ groups.get(mon_group_name, []) }}" + when: + - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" + - hostvars[item]['monitor_address'] is defined + - hostvars[item]['monitor_address'] != '0.0.0.0' + +- name: set_fact _monitor_address to monitor_interface - ipv4 + set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}" + with_items: + - "{{ groups.get(mon_group_name, []) }}" + when: + - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" + - ip_version == 'ipv4' + - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' + - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0' + - hostvars[item]['monitor_interface'] | default('interface') != 'interface' + +- name: set_fact _monitor_address to monitor_interface - ipv6 + set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}" + with_items: + - "{{ groups.get(mon_group_name, []) }}" + when: + - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" + - ip_version == 'ipv6' + - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' + - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0' + - hostvars[item]['monitor_interface'] | default('interface') != 'interface' + +- name: set_fact _current_monitor_address + set_fact: + _current_monitor_address: "{{ item.addr }}" + with_items: "{{ _monitor_addresses }}" + when: inventory_hostname == item.name \ No newline at end of file diff --git a/roles/ceph-facts/tasks/set_radosgw_address.yml b/roles/ceph-facts/tasks/set_radosgw_address.yml new file mode 100644 index 000000000..b0dcd0331 --- /dev/null +++ b/roles/ceph-facts/tasks/set_radosgw_address.yml @@ -0,0 +1,35 @@ +--- +- name: set_fact _radosgw_address to radosgw_address_block + set_fact: + _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}" + when: + - radosgw_address_block is defined + - radosgw_address_block != 'subnet' + +- name: set_fact _radosgw_address to radosgw_address + set_fact: + _radosgw_address: "{{ radosgw_address | ipwrap }}" + when: + - radosgw_address is defined + - radosgw_address != '0.0.0.0' + +- block: + - name: set_fact _interface + set_fact: + _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}" + + - name: set_fact _radosgw_address to radosgw_interface - ipv4 + set_fact: + _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}" + when: + - ip_version == 'ipv4' + + - name: set_fact _radosgw_address to radosgw_interface - ipv6 + set_fact: + _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}" + when: + - ip_version == 'ipv6' + when: + - radosgw_address_block == 'subnet' + - radosgw_address == '0.0.0.0' + - radosgw_interface != 'interface' diff --git a/site-container.yml.sample b/site-container.yml.sample index 3c4425ebf..1a9a321b3 100644 --- a/site-container.yml.sample +++ b/site-container.yml.sample @@ -106,6 +106,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -144,6 +147,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -181,6 +187,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -217,6 +226,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -253,6 +265,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -289,6 +304,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -325,6 +343,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -361,6 +382,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -401,6 +425,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -425,6 +452,8 @@ gather_facts: false become: True tasks: + - import_role: + name: ceph-defaults - name: check if podman binary is present stat: path: /usr/bin/podman @@ -432,16 +461,15 @@ - name: get ceph status from the first monitor command: > - {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s + {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s register: ceph_status changed_when: false - delegate_to: "{{ groups['mons'][0] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - ignore_errors: true # we skip the error if mon_group_name is different than 'mons' - - name: "show ceph status for cluster {{ cluster | default ('ceph') }}" + - name: "show ceph status for cluster {{ cluster }}" debug: msg: "{{ ceph_status.stdout_lines }}" - delegate_to: "{{ groups['mons'][0] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: not ceph_status.failed diff --git a/site.yml.sample b/site.yml.sample index b963f2df2..f594633d7 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -96,6 +96,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -134,6 +137,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -170,6 +176,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -206,6 +215,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -242,6 +254,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -278,6 +293,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -314,6 +332,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -350,6 +371,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -386,6 +410,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -424,6 +451,9 @@ - import_role: name: ceph-defaults tags: ['ceph_update_config'] + - import_role: + name: ceph-facts + tags: ['ceph_update_config'] - import_role: name: ceph-handler - import_role: @@ -449,17 +479,18 @@ gather_facts: false become: True tasks: + - import_role: + name: ceph-defaults - name: get ceph status from the first monitor - command: ceph --cluster {{ cluster | default ('ceph') }} -s + command: ceph --cluster {{ cluster }} -s register: ceph_status changed_when: false - delegate_to: "{{ groups['mons'][0] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - ignore_errors: true # we skip the error if mon_group_name is different than 'mons' - - name: "show ceph status for cluster {{ cluster | default ('ceph') }}" + - name: "show ceph status for cluster {{ cluster }}" debug: msg: "{{ ceph_status.stdout_lines }}" - delegate_to: "{{ groups['mons'][0] }}" + delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: not ceph_status.failed