- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-validate
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
name: ceph-defaults
private: false
+ - import_role:
+ name: ceph-facts
+ private: false
+
- name: get all the running osds
shell: |
systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
name: ceph-defaults
private: false
+ - import_role:
+ name: ceph-facts
+ private: false
+
- name: show container list on all the nodes (should be empty)
command: >
{{ container_binary }} ps --filter='name=ceph' -a -q
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
tasks:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- name: non container - get current fsid
command: "ceph --cluster {{ cluster }} fsid"
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
tasks:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- name: set_fact docker_exec_cmd_osd
set_fact:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
tasks:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
# post_tasks for preceding import
- name: pick a monitor different than the one we want to remove
set_fact:
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
post_tasks:
- name: set_fact docker_exec_cmd build docker exec command (containerized)
set_fact:
tasks:
- import_role:
name: ceph-defaults
+
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
+
- import_role:
name: ceph-container-common
+
- import_role:
name: ceph-mon
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
- import_role:
name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+
- import_role:
name: ceph-handler
+++ /dev/null
----
-- name: check if it is atomic host
- stat:
- path: /run/ostree-booted
- register: stat_ostree
-
-- name: set_fact is_atomic
- set_fact:
- is_atomic: "{{ stat_ostree.stat.exists }}"
-
-- name: check if podman binary is present
- stat:
- path: /usr/bin/podman
- register: podman_binary
-
-- name: set_fact is_podman
- set_fact:
- is_podman: "{{ podman_binary.stat.exists }}"
-
-- name: set_fact container_binary
- set_fact:
- container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}"
- when: containerized_deployment
-
-- name: set_fact monitor_name ansible_hostname
- set_fact:
- monitor_name: "{{ ansible_hostname }}"
- when:
- - not mon_use_fqdn
-
-- name: set_fact monitor_name ansible_fqdn
- set_fact:
- monitor_name: "{{ ansible_fqdn }}"
- when:
- - mon_use_fqdn
-
-- name: set_fact docker_exec_cmd
- set_fact:
- docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - containerized_deployment
- - groups.get(mon_group_name, []) | length > 0
-
-# this task shouldn't run in a rolling_update situation
-# because it blindly picks a mon, which may be down because
-# of the rolling update
-- name: is ceph running already?
- command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
- changed_when: false
- failed_when: false
- check_mode: no
- register: ceph_current_status
- run_once: true
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not rolling_update
- - groups.get(mon_group_name, []) | length > 0
-
-# We want this check to be run only on the first node
-- name: check if {{ fetch_directory }} directory exists
- stat:
- path: "{{ fetch_directory }}/monitor_keyring.conf"
- delegate_to: localhost
- become: false
- register: monitor_keyring_conf
- run_once: true
-
-# set this as a default when performing a rolling_update
-# so the rest of the tasks here will succeed
-- name: set_fact ceph_current_status rc 1
- set_fact:
- ceph_current_status:
- rc: 1
- when:
- - rolling_update or groups.get(mon_group_name, []) | length == 0
-
-- name: create a local fetch directory if it does not exist
- file:
- path: "{{ fetch_directory }}"
- state: directory
- delegate_to: localhost
- changed_when: false
- become: false
- when:
- - (cephx or generate_fsid)
-
-- name: set_fact ceph_current_status (convert to json)
- set_fact:
- ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
- when:
- - not rolling_update
- - ceph_current_status.rc == 0
-
-- name: set_fact fsid from ceph_current_status
- set_fact:
- fsid: "{{ ceph_current_status.fsid }}"
- when:
- - ceph_current_status.fsid is defined
-
-# Set ceph_release to ceph_stable by default
-- name: set_fact ceph_release ceph_stable_release
- set_fact:
- ceph_release: "{{ ceph_stable_release }}"
-
-- name: generate cluster fsid
- shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- args:
- creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- register: cluster_uuid
- delegate_to: localhost
- become: false
- when:
- - generate_fsid
- - ceph_current_status.fsid is undefined
-
-- name: reuse cluster fsid when cluster is already running
- shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- args:
- creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- delegate_to: localhost
- become: false
- when:
- - ceph_current_status.fsid is defined
-
-- name: read cluster fsid if it already exists
- command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- args:
- removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- delegate_to: localhost
- changed_when: false
- register: cluster_uuid
- become: false
- check_mode: no
- when:
- - generate_fsid
-
-- name: set_fact fsid
- set_fact:
- fsid: "{{ cluster_uuid.stdout }}"
- when:
- - generate_fsid
-
-- name: set_fact mds_name ansible_hostname
- set_fact:
- mds_name: "{{ ansible_hostname }}"
- when:
- - not mds_use_fqdn
-
-- name: set_fact mds_name ansible_fqdn
- set_fact:
- mds_name: "{{ ansible_fqdn }}"
- when:
- - mds_use_fqdn
-
-- name: set_fact rbd_client_directory_owner ceph
- set_fact:
- rbd_client_directory_owner: ceph
- when:
- - rbd_client_directory_owner is not defined
- or not rbd_client_directory_owner
-
-- name: set_fact rbd_client_directory_group rbd_client_directory_group
- set_fact:
- rbd_client_directory_group: ceph
- when:
- - rbd_client_directory_group is not defined
- or not rbd_client_directory_group
-
-- name: set_fact rbd_client_directory_mode 0770
- set_fact:
- rbd_client_directory_mode: "0770"
- when:
- - rbd_client_directory_mode is not defined
- or not rbd_client_directory_mode
-
-- name: resolve device link(s)
- command: readlink -f {{ item }}
- changed_when: false
- with_items: "{{ devices }}"
- register: devices_prepare_canonicalize
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build devices from resolved symlinks
- set_fact:
- devices: "{{ devices | default([]) + [ item.stdout ] }}"
- with_items: "{{ devices_prepare_canonicalize.results }}"
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build final devices list
- set_fact:
- devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact ceph_uid for debian based system - non container
- set_fact:
- ceph_uid: 64045
- when:
- - not containerized_deployment
- - ansible_os_family == 'Debian'
-
-- name: set_fact ceph_uid for red hat or suse based system - non container
- set_fact:
- ceph_uid: 167
- when:
- - not containerized_deployment
- - ansible_os_family in ['RedHat', 'Suse']
-
-- name: set_fact ceph_uid for debian based system - container
- set_fact:
- ceph_uid: 64045
- when:
- - containerized_deployment
- - ceph_docker_image_tag | string is search("ubuntu")
-
-- name: set_fact ceph_uid for red hat based system - container
- set_fact:
- ceph_uid: 167
- when:
- - containerized_deployment
- - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
-
-- name: set_fact ceph_uid for red hat
- set_fact:
- ceph_uid: 167
- when:
- - containerized_deployment
- - ceph_docker_image is search("rhceph")
-
-- name: set_fact rgw_hostname
- set_fact:
- rgw_hostname: "{% set _value = ansible_hostname -%}
- {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%}
- {% if key == ansible_fqdn -%}
- {% set _value = key -%}
- {% endif -%}
- {% endfor -%}
- {{ _value }}"
- when:
- - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- - ceph_current_status['servicemap'] is defined
- - ceph_current_status['servicemap']['services'] is defined
- - ceph_current_status['servicemap']['services']['rgw'] is defined
-
-- name: set_fact osd_pool_default_pg_num
- set_fact:
- osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
-
-- name: set_fact osd_pool_default_size
- set_fact:
- osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
-
-- name: import_tasks set_monitor_address.yml
- import_tasks: set_monitor_address.yml
-
-- name: import_tasks set_radosgw_address.yml
- import_tasks: set_radosgw_address.yml
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
----
-- name: include facts.yml
- include_tasks: facts.yml
+---
\ No newline at end of file
+++ /dev/null
----
-- name: set_fact _monitor_address to monitor_address_block
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
- when:
- - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- - hostvars[item]['monitor_address_block'] is defined
- - hostvars[item]['monitor_address_block'] != 'subnet'
-
-- name: set_fact _monitor_address to monitor_address
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
- when:
- - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- - hostvars[item]['monitor_address'] is defined
- - hostvars[item]['monitor_address'] != '0.0.0.0'
-
-- name: set_fact _monitor_address to monitor_interface - ipv4
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
- when:
- - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- - ip_version == 'ipv4'
- - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
- - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
- - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-
-- name: set_fact _monitor_address to monitor_interface - ipv6
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
- with_items:
- - "{{ groups.get(mon_group_name, []) }}"
- when:
- - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- - ip_version == 'ipv6'
- - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
- - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
- - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-
-- name: set_fact _current_monitor_address
- set_fact:
- _current_monitor_address: "{{ item.addr }}"
- with_items: "{{ _monitor_addresses }}"
- when: inventory_hostname == item.name
\ No newline at end of file
+++ /dev/null
----
-- name: set_fact _radosgw_address to radosgw_address_block
- set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}"
- when:
- - radosgw_address_block is defined
- - radosgw_address_block != 'subnet'
-
-- name: set_fact _radosgw_address to radosgw_address
- set_fact:
- _radosgw_address: "{{ radosgw_address | ipwrap }}"
- when:
- - radosgw_address is defined
- - radosgw_address != '0.0.0.0'
-
-- block:
- - name: set_fact _interface
- set_fact:
- _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
-
- - name: set_fact _radosgw_address to radosgw_interface - ipv4
- set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
- when:
- - ip_version == 'ipv4'
-
- - name: set_fact _radosgw_address to radosgw_interface - ipv6
- set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
- when:
- - ip_version == 'ipv6'
- when:
- - radosgw_address_block == 'subnet'
- - radosgw_address == '0.0.0.0'
- - radosgw_interface != 'interface'
--- /dev/null
+# Ansible role: ceph-facts
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Set some facts for ceph to be deployed
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: Ubuntu
+ versions:
+ - xenial
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+- name: set_fact is_atomic
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+- name: check if podman binary is present
+ stat:
+ path: /usr/bin/podman
+ register: podman_binary
+
+- name: set_fact is_podman
+ set_fact:
+ is_podman: "{{ podman_binary.stat.exists }}"
+
+- name: set_fact container_binary
+ set_fact:
+ container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}"
+ when: containerized_deployment
+
+- name: set_fact monitor_name ansible_hostname
+ set_fact:
+ monitor_name: "{{ ansible_hostname }}"
+ when:
+ - not mon_use_fqdn
+
+- name: set_fact monitor_name ansible_fqdn
+ set_fact:
+ monitor_name: "{{ ansible_fqdn }}"
+ when:
+ - mon_use_fqdn
+
+- name: set_fact docker_exec_cmd
+ set_fact:
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - containerized_deployment
+ - groups.get(mon_group_name, []) | length > 0
+
+# this task shouldn't run in a rolling_update situation
+# because it blindly picks a mon, which may be down because
+# of the rolling update
+- name: is ceph running already?
+ command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: ceph_current_status
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - not rolling_update
+ - groups.get(mon_group_name, []) | length > 0
+
+# We want this check to be run only on the first node
+- name: check if {{ fetch_directory }} directory exists
+ stat:
+ path: "{{ fetch_directory }}/monitor_keyring.conf"
+ delegate_to: localhost
+ become: false
+ register: monitor_keyring_conf
+ run_once: true
+
+# set this as a default when performing a rolling_update
+# so the rest of the tasks here will succeed
+- name: set_fact ceph_current_status rc 1
+ set_fact:
+ ceph_current_status:
+ rc: 1
+ when:
+ - rolling_update or groups.get(mon_group_name, []) | length == 0
+
+- name: create a local fetch directory if it does not exist
+ file:
+ path: "{{ fetch_directory }}"
+ state: directory
+ delegate_to: localhost
+ changed_when: false
+ become: false
+ when:
+ - (cephx or generate_fsid)
+
+- name: set_fact ceph_current_status (convert to json)
+ set_fact:
+ ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
+ when:
+ - not rolling_update
+ - ceph_current_status.rc == 0
+
+- name: set_fact fsid from ceph_current_status
+ set_fact:
+ fsid: "{{ ceph_current_status.fsid }}"
+ when:
+ - ceph_current_status.fsid is defined
+
+# Set ceph_release to ceph_stable by default
+- name: set_fact ceph_release ceph_stable_release
+ set_fact:
+ ceph_release: "{{ ceph_stable_release }}"
+
+- name: generate cluster fsid
+ shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ args:
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ register: cluster_uuid
+ delegate_to: localhost
+ become: false
+ when:
+ - generate_fsid
+ - ceph_current_status.fsid is undefined
+
+- name: reuse cluster fsid when cluster is already running
+ shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ args:
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ delegate_to: localhost
+ become: false
+ when:
+ - ceph_current_status.fsid is defined
+
+- name: read cluster fsid if it already exists
+ command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ args:
+ removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ delegate_to: localhost
+ changed_when: false
+ register: cluster_uuid
+ become: false
+ check_mode: no
+ when:
+ - generate_fsid
+
+- name: set_fact fsid
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+ when:
+ - generate_fsid
+
+- name: set_fact mds_name ansible_hostname
+ set_fact:
+ mds_name: "{{ ansible_hostname }}"
+ when:
+ - not mds_use_fqdn
+
+- name: set_fact mds_name ansible_fqdn
+ set_fact:
+ mds_name: "{{ ansible_fqdn }}"
+ when:
+ - mds_use_fqdn
+
+- name: set_fact rbd_client_directory_owner ceph
+ set_fact:
+ rbd_client_directory_owner: ceph
+ when:
+ - rbd_client_directory_owner is not defined
+ or not rbd_client_directory_owner
+
+- name: set_fact rbd_client_directory_group rbd_client_directory_group
+ set_fact:
+ rbd_client_directory_group: ceph
+ when:
+ - rbd_client_directory_group is not defined
+ or not rbd_client_directory_group
+
+- name: set_fact rbd_client_directory_mode 0770
+ set_fact:
+ rbd_client_directory_mode: "0770"
+ when:
+ - rbd_client_directory_mode is not defined
+ or not rbd_client_directory_mode
+
+- name: resolve device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ with_items: "{{ devices }}"
+ register: devices_prepare_canonicalize
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build devices from resolved symlinks
+ set_fact:
+ devices: "{{ devices | default([]) + [ item.stdout ] }}"
+ with_items: "{{ devices_prepare_canonicalize.results }}"
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build final devices list
+ set_fact:
+ devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact ceph_uid for debian based system - non container
+ set_fact:
+ ceph_uid: 64045
+ when:
+ - not containerized_deployment
+ - ansible_os_family == 'Debian'
+
+- name: set_fact ceph_uid for red hat or suse based system - non container
+ set_fact:
+ ceph_uid: 167
+ when:
+ - not containerized_deployment
+ - ansible_os_family in ['RedHat', 'Suse']
+
+- name: set_fact ceph_uid for debian based system - container
+ set_fact:
+ ceph_uid: 64045
+ when:
+ - containerized_deployment
+ - ceph_docker_image_tag | string is search("ubuntu")
+
+- name: set_fact ceph_uid for red hat based system - container
+ set_fact:
+ ceph_uid: 167
+ when:
+ - containerized_deployment
+ - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
+
+- name: set_fact ceph_uid for red hat
+ set_fact:
+ ceph_uid: 167
+ when:
+ - containerized_deployment
+ - ceph_docker_image is search("rhceph")
+
+- name: set_fact rgw_hostname
+ set_fact:
+ rgw_hostname: "{% set _value = ansible_hostname -%}
+ {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%}
+ {% if key == ansible_fqdn -%}
+ {% set _value = key -%}
+ {% endif -%}
+ {% endfor -%}
+ {{ _value }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
+ - ceph_current_status['servicemap'] is defined
+ - ceph_current_status['servicemap']['services'] is defined
+ - ceph_current_status['servicemap']['services']['rgw'] is defined
+
+- name: set_fact osd_pool_default_pg_num
+ set_fact:
+ osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
+- name: set_fact osd_pool_default_size
+ set_fact:
+ osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
+
+- name: import_tasks set_monitor_address.yml
+ import_tasks: set_monitor_address.yml
+
+- name: import_tasks set_radosgw_address.yml
+ import_tasks: set_radosgw_address.yml
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
--- /dev/null
+---
+- name: include facts.yml
+ include_tasks: facts.yml
--- /dev/null
+---
+- name: set_fact _monitor_address to monitor_address_block
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}"
+ with_items:
+ - "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - hostvars[item]['monitor_address_block'] is defined
+ - hostvars[item]['monitor_address_block'] != 'subnet'
+
+- name: set_fact _monitor_address to monitor_address
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
+ with_items:
+ - "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - hostvars[item]['monitor_address'] is defined
+ - hostvars[item]['monitor_address'] != '0.0.0.0'
+
+- name: set_fact _monitor_address to monitor_interface - ipv4
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
+ with_items:
+ - "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - ip_version == 'ipv4'
+ - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
+ - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
+ - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _monitor_address to monitor_interface - ipv6
+ set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+ with_items:
+ - "{{ groups.get(mon_group_name, []) }}"
+ when:
+ - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+ - ip_version == 'ipv6'
+ - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet'
+ - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
+ - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _current_monitor_address
+ set_fact:
+ _current_monitor_address: "{{ item.addr }}"
+ with_items: "{{ _monitor_addresses }}"
+ when: inventory_hostname == item.name
\ No newline at end of file
--- /dev/null
+---
+- name: set_fact _radosgw_address to radosgw_address_block
+ set_fact:
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}"
+ when:
+ - radosgw_address_block is defined
+ - radosgw_address_block != 'subnet'
+
+- name: set_fact _radosgw_address to radosgw_address
+ set_fact:
+ _radosgw_address: "{{ radosgw_address | ipwrap }}"
+ when:
+ - radosgw_address is defined
+ - radosgw_address != '0.0.0.0'
+
+- block:
+ - name: set_fact _interface
+ set_fact:
+ _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
+
+ - name: set_fact _radosgw_address to radosgw_interface - ipv4
+ set_fact:
+ _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
+ when:
+ - ip_version == 'ipv4'
+
+ - name: set_fact _radosgw_address to radosgw_interface - ipv6
+ set_fact:
+ _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
+ when:
+ - ip_version == 'ipv6'
+ when:
+ - radosgw_address_block == 'subnet'
+ - radosgw_address == '0.0.0.0'
+ - radosgw_interface != 'interface'
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
gather_facts: false
become: True
tasks:
+ - import_role:
+ name: ceph-defaults
- name: check if podman binary is present
stat:
path: /usr/bin/podman
- name: get ceph status from the first monitor
command: >
- {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+ {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+ - name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-facts
+ tags: ['ceph_update_config']
- import_role:
name: ceph-handler
- import_role:
gather_facts: false
become: True
tasks:
+ - import_role:
+ name: ceph-defaults
- name: get ceph status from the first monitor
- command: ceph --cluster {{ cluster | default ('ceph') }} -s
+ command: ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+ - name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed