]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
introduce new role ceph-facts
authorGuillaume Abrioux <gabrioux@redhat.com>
Mon, 10 Dec 2018 14:46:32 +0000 (15:46 +0100)
committerSébastien Han <seb@redhat.com>
Wed, 12 Dec 2018 10:18:01 +0000 (11:18 +0100)
sometimes we play the whole role `ceph-defaults` just to access the
default value of some variables. It means we play the `facts.yml` part
in this role while it's not desired. Splitting this role will speedup
the playbook.

Closes: #3282
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
19 files changed:
infrastructure-playbooks/add-osd.yml
infrastructure-playbooks/purge-docker-cluster.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/shrink-mon.yml
infrastructure-playbooks/shrink-osd.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
roles/ceph-defaults/tasks/facts.yml [deleted file]
roles/ceph-defaults/tasks/main.yml
roles/ceph-defaults/tasks/set_monitor_address.yml [deleted file]
roles/ceph-defaults/tasks/set_radosgw_address.yml [deleted file]
roles/ceph-facts/README.md [new file with mode: 0644]
roles/ceph-facts/defaults/main.yml [new file with mode: 0644]
roles/ceph-facts/meta/main.yml [new file with mode: 0644]
roles/ceph-facts/tasks/facts.yml [new file with mode: 0644]
roles/ceph-facts/tasks/main.yml [new file with mode: 0644]
roles/ceph-facts/tasks/set_monitor_address.yml [new file with mode: 0644]
roles/ceph-facts/tasks/set_radosgw_address.yml [new file with mode: 0644]
site-container.yml.sample
site.yml.sample

index fa5a2f460ead5798f1c3962c4c925e5a3b5e79bd..393e6e6f786ef4ad45c1c64cd04f2927f002f4d1 100644 (file)
@@ -45,6 +45,9 @@
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-validate
 
@@ -68,6 +71,9 @@
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
index ccc69ebf759ef34eed34085d61e15542dede0e6b..6b0969056eccf89b6da419b179ee39d0c4e917cb 100644 (file)
       name: ceph-defaults
       private: false
 
+  - import_role:
+      name: ceph-facts
+      private: false
+
   - name: get all the running osds
     shell: |
       systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
       name: ceph-defaults
       private: false
 
+  - import_role:
+      name: ceph-facts
+      private: false
+
   - name: show container list on all the nodes (should be empty)
     command: >
       {{ container_binary }} ps --filter='name=ceph' -a -q
index ffda4d7e9dd3a87e9e9d6bf645e8e2eb5da1355f..5547a8b81f1a6abce63f426ed6ffa683f0cc3fe2 100644 (file)
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
   tasks:
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
 
     - name: non container - get current fsid
       command: "ceph --cluster {{ cluster }} fsid"
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
   tasks:
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
 
     - name: set_fact docker_exec_cmd_osd
       set_fact:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
 
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
   tasks:
     - import_role:
         name: ceph-defaults
+    - import_role:
+        name: ceph-facts
     - import_role:
         name: ceph-handler
     - import_role:
index 84d8d2270524b4a2c34c8db13385d1fee9b83e8b..88634525e4180ef7dfc75c7eb580d0e2dc64e6c4 100644 (file)
@@ -70,6 +70,9 @@
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     # post_tasks for preceding import
     - name: pick a monitor different than the one we want to remove
       set_fact:
index 40b0c0d66715474f2c63125a4f5e4dc06e8f7c9a..b791a8830a959250fc71e7e55ce1aac929ee460e 100644 (file)
@@ -59,6 +59,9 @@
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
   post_tasks:
     - name: set_fact docker_exec_cmd build docker exec command (containerized)
       set_fact:
index e702d8c35e49eeb72284b945fb54ff8376e43ff1..8f931ddb742841f4304b86e88400f04764a391c7 100644 (file)
   tasks:
     - import_role:
         name: ceph-defaults
+
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
+
     - import_role:
         name: ceph-container-common
+
     - import_role:
         name: ceph-mon
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
     - import_role:
         name: ceph-defaults
 
+    - import_role:
+        name: ceph-facts
+
     - import_role:
         name: ceph-handler
 
diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml
deleted file mode 100644 (file)
index 08ef682..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
----
-- name: check if it is atomic host
-  stat:
-    path: /run/ostree-booted
-  register: stat_ostree
-
-- name: set_fact is_atomic
-  set_fact:
-    is_atomic: "{{ stat_ostree.stat.exists }}"
-
-- name: check if podman binary is present
-  stat:
-    path: /usr/bin/podman
-  register: podman_binary
-
-- name: set_fact is_podman
-  set_fact:
-    is_podman: "{{ podman_binary.stat.exists }}"
-
-- name: set_fact container_binary
-  set_fact:
-    container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}"
-  when: containerized_deployment
-
-- name: set_fact monitor_name ansible_hostname
-  set_fact:
-    monitor_name: "{{ ansible_hostname }}"
-  when:
-    - not mon_use_fqdn
-
-- name: set_fact monitor_name ansible_fqdn
-  set_fact:
-    monitor_name: "{{ ansible_fqdn }}"
-  when:
-    - mon_use_fqdn
-
-- name: set_fact docker_exec_cmd
-  set_fact:
-    docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - containerized_deployment
-    - groups.get(mon_group_name, []) | length > 0
-
-# this task shouldn't run in a rolling_update situation
-# because it blindly picks a mon, which may be down because
-# of the rolling update
-- name: is ceph running already?
-  command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
-  changed_when: false
-  failed_when: false
-  check_mode: no
-  register: ceph_current_status
-  run_once: true
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - not rolling_update
-    - groups.get(mon_group_name, []) | length > 0
-
-# We want this check to be run only on the first node
-- name: check if {{ fetch_directory }} directory exists
-  stat:
-    path: "{{ fetch_directory }}/monitor_keyring.conf"
-  delegate_to: localhost
-  become: false
-  register: monitor_keyring_conf
-  run_once: true
-
-# set this as a default when performing a rolling_update
-# so the rest of the tasks here will succeed
-- name: set_fact ceph_current_status rc 1
-  set_fact:
-    ceph_current_status:
-      rc: 1
-  when:
-    - rolling_update or groups.get(mon_group_name, []) | length == 0
-
-- name: create a local fetch directory if it does not exist
-  file:
-    path: "{{ fetch_directory }}"
-    state: directory
-  delegate_to: localhost
-  changed_when: false
-  become: false
-  when:
-    - (cephx or generate_fsid)
-
-- name: set_fact ceph_current_status (convert to json)
-  set_fact:
-    ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
-  when:
-    - not rolling_update
-    - ceph_current_status.rc == 0
-
-- name: set_fact fsid from ceph_current_status
-  set_fact:
-    fsid: "{{ ceph_current_status.fsid }}"
-  when:
-    - ceph_current_status.fsid is defined
-
-# Set ceph_release to ceph_stable by default
-- name: set_fact ceph_release ceph_stable_release
-  set_fact:
-    ceph_release: "{{ ceph_stable_release }}"
-
-- name: generate cluster fsid
-  shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-  args:
-    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  register: cluster_uuid
-  delegate_to: localhost
-  become: false
-  when:
-    - generate_fsid
-    - ceph_current_status.fsid is undefined
-
-- name: reuse cluster fsid when cluster is already running
-  shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-  args:
-    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  delegate_to: localhost
-  become: false
-  when:
-    - ceph_current_status.fsid is defined
-
-- name: read cluster fsid if it already exists
-  command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-  args:
-    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  delegate_to: localhost
-  changed_when: false
-  register: cluster_uuid
-  become: false
-  check_mode: no
-  when:
-    - generate_fsid
-
-- name: set_fact fsid
-  set_fact:
-    fsid: "{{ cluster_uuid.stdout }}"
-  when:
-    - generate_fsid
-
-- name: set_fact mds_name ansible_hostname
-  set_fact:
-    mds_name: "{{ ansible_hostname }}"
-  when:
-    - not mds_use_fqdn
-
-- name: set_fact mds_name ansible_fqdn
-  set_fact:
-    mds_name: "{{ ansible_fqdn }}"
-  when:
-    - mds_use_fqdn
-
-- name: set_fact rbd_client_directory_owner ceph
-  set_fact:
-    rbd_client_directory_owner: ceph
-  when:
-    - rbd_client_directory_owner is not defined
-      or not rbd_client_directory_owner
-
-- name: set_fact rbd_client_directory_group rbd_client_directory_group
-  set_fact:
-    rbd_client_directory_group: ceph
-  when:
-    - rbd_client_directory_group is not defined
-      or not rbd_client_directory_group
-
-- name: set_fact rbd_client_directory_mode 0770
-  set_fact:
-    rbd_client_directory_mode: "0770"
-  when:
-    - rbd_client_directory_mode is not defined
-      or not rbd_client_directory_mode
-
-- name: resolve device link(s)
-  command: readlink -f {{ item }}
-  changed_when: false
-  with_items: "{{ devices }}"
-  register: devices_prepare_canonicalize
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build devices from resolved symlinks
-  set_fact:
-    devices: "{{ devices | default([]) + [ item.stdout ] }}"
-  with_items: "{{ devices_prepare_canonicalize.results }}"
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build final devices list
-  set_fact:
-    devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact ceph_uid for debian based system - non container
-  set_fact:
-    ceph_uid: 64045
-  when:
-    - not containerized_deployment
-    - ansible_os_family == 'Debian'
-
-- name: set_fact ceph_uid for red hat or suse based system - non container
-  set_fact:
-    ceph_uid: 167
-  when:
-    - not containerized_deployment
-    - ansible_os_family in ['RedHat', 'Suse']
-
-- name: set_fact ceph_uid for debian based system - container
-  set_fact:
-    ceph_uid: 64045
-  when:
-    - containerized_deployment
-    - ceph_docker_image_tag | string is search("ubuntu")
-
-- name: set_fact ceph_uid for red hat based system - container
-  set_fact:
-    ceph_uid: 167
-  when:
-    - containerized_deployment
-    - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
-
-- name: set_fact ceph_uid for red hat
-  set_fact:
-    ceph_uid: 167
-  when:
-    - containerized_deployment
-    - ceph_docker_image is search("rhceph")
-
-- name: set_fact rgw_hostname
-  set_fact:
-    rgw_hostname: "{% set _value = ansible_hostname -%}
-    {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%}
-    {% if key == ansible_fqdn -%}
-    {% set _value = key -%}
-    {% endif -%}
-    {% endfor -%}
-    {{ _value }}"
-  when:
-    - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
-    - ceph_current_status['servicemap'] is defined
-    - ceph_current_status['servicemap']['services'] is defined
-    - ceph_current_status['servicemap']['services']['rgw'] is defined
-
-- name: set_fact osd_pool_default_pg_num
-  set_fact:
-    osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
-
-- name: set_fact osd_pool_default_size
-  set_fact:
-    osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
-
-- name: import_tasks set_monitor_address.yml
-  import_tasks: set_monitor_address.yml
-
-- name: import_tasks set_radosgw_address.yml
-  import_tasks: set_radosgw_address.yml
-  when:
-    - inventory_hostname in groups.get(rgw_group_name, [])
index 37b7149d23a9494788e5b806e7c5aee67b0ef024..73b314ff7c704c18889cf90fdc024716c634adb6 100644 (file)
@@ -1,3 +1 @@
----
-- name: include facts.yml
-  include_tasks: facts.yml
+---
\ No newline at end of file
diff --git a/roles/ceph-defaults/tasks/set_monitor_address.yml b/roles/ceph-defaults/tasks/set_monitor_address.yml
deleted file mode 100644 (file)
index 7ac1534..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- name: set_fact _monitor_address to monitor_address_block
-  set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}"
-  with_items:
-    - "{{ groups.get(mon_group_name, []) }}"
-  when:
-    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
-    - hostvars[item]['monitor_address_block'] is defined
-    - hostvars[item]['monitor_address_block'] != 'subnet'
-
-- name: set_fact _monitor_address to monitor_address
-  set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
-  with_items:
-    - "{{ groups.get(mon_group_name, []) }}"
-  when:
-    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
-    - hostvars[item]['monitor_address'] is defined
-    - hostvars[item]['monitor_address'] != '0.0.0.0'
-
-- name: set_fact _monitor_address to monitor_interface - ipv4
-  set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
-  with_items:
-    - "{{ groups.get(mon_group_name, []) }}"
-  when:
-    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
-    - ip_version == 'ipv4'
-    - hostvars[item]['monitor_address_block'] | default('subnet')  == 'subnet'
-    - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
-    - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-
-- name: set_fact _monitor_address to monitor_interface - ipv6
-  set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
-  with_items:
-    - "{{ groups.get(mon_group_name, []) }}"
-  when:
-    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
-    - ip_version == 'ipv6'
-    - hostvars[item]['monitor_address_block'] | default('subnet')  == 'subnet'
-    - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
-    - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-
-- name: set_fact _current_monitor_address
-  set_fact:
-    _current_monitor_address: "{{ item.addr }}"
-  with_items: "{{ _monitor_addresses }}"
-  when: inventory_hostname == item.name
\ No newline at end of file
diff --git a/roles/ceph-defaults/tasks/set_radosgw_address.yml b/roles/ceph-defaults/tasks/set_radosgw_address.yml
deleted file mode 100644 (file)
index b0dcd03..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- name: set_fact _radosgw_address to radosgw_address_block
-  set_fact:
-    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}"
-  when:
-    - radosgw_address_block is defined
-    - radosgw_address_block != 'subnet'
-
-- name: set_fact _radosgw_address to radosgw_address
-  set_fact:
-    _radosgw_address: "{{ radosgw_address | ipwrap }}"
-  when:
-    - radosgw_address is defined
-    - radosgw_address != '0.0.0.0'
-
-- block:
-  - name: set_fact _interface
-    set_fact:
-      _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
-
-  - name: set_fact _radosgw_address to radosgw_interface - ipv4
-    set_fact:
-      _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
-    when:
-      - ip_version == 'ipv4'
-
-  - name: set_fact _radosgw_address to radosgw_interface - ipv6
-    set_fact:
-      _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
-    when:
-      - ip_version == 'ipv6'
-  when:
-    - radosgw_address_block == 'subnet'
-    - radosgw_address == '0.0.0.0'
-    - radosgw_interface != 'interface'
diff --git a/roles/ceph-facts/README.md b/roles/ceph-facts/README.md
new file mode 100644 (file)
index 0000000..592982d
--- /dev/null
@@ -0,0 +1,3 @@
+# Ansible role: ceph-facts
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
diff --git a/roles/ceph-facts/defaults/main.yml b/roles/ceph-facts/defaults/main.yml
new file mode 100644 (file)
index 0000000..73b314f
--- /dev/null
@@ -0,0 +1 @@
+---
\ No newline at end of file
diff --git a/roles/ceph-facts/meta/main.yml b/roles/ceph-facts/meta/main.yml
new file mode 100644 (file)
index 0000000..b834c53
--- /dev/null
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+  company: Red Hat
+  author: Guillaume Abrioux
+  description: Set some facts for ceph to be deployed
+  license: Apache
+  min_ansible_version: 2.7
+  platforms:
+    - name: Ubuntu
+      versions:
+        - xenial
+    - name: EL
+      versions:
+        - 7
+  galaxy_tags:
+    - system
+dependencies: []
diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml
new file mode 100644 (file)
index 0000000..08ef682
--- /dev/null
@@ -0,0 +1,271 @@
+---
+- name: check if it is atomic host
+  stat:
+    path: /run/ostree-booted
+  register: stat_ostree
+
+- name: set_fact is_atomic
+  set_fact:
+    is_atomic: "{{ stat_ostree.stat.exists }}"
+
+- name: check if podman binary is present
+  stat:
+    path: /usr/bin/podman
+  register: podman_binary
+
+- name: set_fact is_podman
+  set_fact:
+    is_podman: "{{ podman_binary.stat.exists }}"
+
+- name: set_fact container_binary
+  set_fact:
+    container_binary: "{{ 'podman' if is_podman and ansible_distribution == 'Fedora' else 'docker' }}"
+  when: containerized_deployment
+
+- name: set_fact monitor_name ansible_hostname
+  set_fact:
+    monitor_name: "{{ ansible_hostname }}"
+  when:
+    - not mon_use_fqdn
+
+- name: set_fact monitor_name ansible_fqdn
+  set_fact:
+    monitor_name: "{{ ansible_fqdn }}"
+  when:
+    - mon_use_fqdn
+
+- name: set_fact docker_exec_cmd
+  set_fact:
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - containerized_deployment
+    - groups.get(mon_group_name, []) | length > 0
+
+# this task shouldn't run in a rolling_update situation
+# because it blindly picks a mon, which may be down because
+# of the rolling update
+- name: is ceph running already?
+  command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  register: ceph_current_status
+  run_once: true
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - not rolling_update
+    - groups.get(mon_group_name, []) | length > 0
+
+# We want this check to be run only on the first node
+- name: check if {{ fetch_directory }} directory exists
+  stat:
+    path: "{{ fetch_directory }}/monitor_keyring.conf"
+  delegate_to: localhost
+  become: false
+  register: monitor_keyring_conf
+  run_once: true
+
+# set this as a default when performing a rolling_update
+# so the rest of the tasks here will succeed
+- name: set_fact ceph_current_status rc 1
+  set_fact:
+    ceph_current_status:
+      rc: 1
+  when:
+    - rolling_update or groups.get(mon_group_name, []) | length == 0
+
+- name: create a local fetch directory if it does not exist
+  file:
+    path: "{{ fetch_directory }}"
+    state: directory
+  delegate_to: localhost
+  changed_when: false
+  become: false
+  when:
+    - (cephx or generate_fsid)
+
+- name: set_fact ceph_current_status (convert to json)
+  set_fact:
+    ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
+  when:
+    - not rolling_update
+    - ceph_current_status.rc == 0
+
+- name: set_fact fsid from ceph_current_status
+  set_fact:
+    fsid: "{{ ceph_current_status.fsid }}"
+  when:
+    - ceph_current_status.fsid is defined
+
+# Set ceph_release to ceph_stable by default
+- name: set_fact ceph_release ceph_stable_release
+  set_fact:
+    ceph_release: "{{ ceph_stable_release }}"
+
+- name: generate cluster fsid
+  shell: python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+  args:
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  register: cluster_uuid
+  delegate_to: localhost
+  become: false
+  when:
+    - generate_fsid
+    - ceph_current_status.fsid is undefined
+
+- name: reuse cluster fsid when cluster is already running
+  shell: echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+  args:
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  delegate_to: localhost
+  become: false
+  when:
+    - ceph_current_status.fsid is defined
+
+- name: read cluster fsid if it already exists
+  command: cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+  args:
+    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  delegate_to: localhost
+  changed_when: false
+  register: cluster_uuid
+  become: false
+  check_mode: no
+  when:
+    - generate_fsid
+
+- name: set_fact fsid
+  set_fact:
+    fsid: "{{ cluster_uuid.stdout }}"
+  when:
+    - generate_fsid
+
+- name: set_fact mds_name ansible_hostname
+  set_fact:
+    mds_name: "{{ ansible_hostname }}"
+  when:
+    - not mds_use_fqdn
+
+- name: set_fact mds_name ansible_fqdn
+  set_fact:
+    mds_name: "{{ ansible_fqdn }}"
+  when:
+    - mds_use_fqdn
+
+- name: set_fact rbd_client_directory_owner ceph
+  set_fact:
+    rbd_client_directory_owner: ceph
+  when:
+    - rbd_client_directory_owner is not defined
+      or not rbd_client_directory_owner
+
+- name: set_fact rbd_client_directory_group rbd_client_directory_group
+  set_fact:
+    rbd_client_directory_group: ceph
+  when:
+    - rbd_client_directory_group is not defined
+      or not rbd_client_directory_group
+
+- name: set_fact rbd_client_directory_mode 0770
+  set_fact:
+    rbd_client_directory_mode: "0770"
+  when:
+    - rbd_client_directory_mode is not defined
+      or not rbd_client_directory_mode
+
+- name: resolve device link(s)
+  command: readlink -f {{ item }}
+  changed_when: false
+  with_items: "{{ devices }}"
+  register: devices_prepare_canonicalize
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build devices from resolved symlinks
+  set_fact:
+    devices: "{{ devices | default([]) + [ item.stdout ] }}"
+  with_items: "{{ devices_prepare_canonicalize.results }}"
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build final devices list
+  set_fact:
+    devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact ceph_uid for debian based system - non container
+  set_fact:
+    ceph_uid: 64045
+  when:
+    - not containerized_deployment
+    - ansible_os_family == 'Debian'
+
+- name: set_fact ceph_uid for red hat or suse based system - non container
+  set_fact:
+    ceph_uid: 167
+  when:
+    - not containerized_deployment
+    - ansible_os_family in ['RedHat', 'Suse']
+
+- name: set_fact ceph_uid for debian based system - container
+  set_fact:
+    ceph_uid: 64045
+  when:
+    - containerized_deployment
+    - ceph_docker_image_tag | string is search("ubuntu")
+
+- name: set_fact ceph_uid for red hat based system - container
+  set_fact:
+    ceph_uid: 167
+  when:
+    - containerized_deployment
+    - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
+
+- name: set_fact ceph_uid for red hat
+  set_fact:
+    ceph_uid: 167
+  when:
+    - containerized_deployment
+    - ceph_docker_image is search("rhceph")
+
+- name: set_fact rgw_hostname
+  set_fact:
+    rgw_hostname: "{% set _value = ansible_hostname -%}
+    {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%}
+    {% if key == ansible_fqdn -%}
+    {% set _value = key -%}
+    {% endif -%}
+    {% endfor -%}
+    {{ _value }}"
+  when:
+    - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
+    - ceph_current_status['servicemap'] is defined
+    - ceph_current_status['servicemap']['services'] is defined
+    - ceph_current_status['servicemap']['services']['rgw'] is defined
+
+- name: set_fact osd_pool_default_pg_num
+  set_fact:
+    osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
+- name: set_fact osd_pool_default_size
+  set_fact:
+    osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
+
+- name: import_tasks set_monitor_address.yml
+  import_tasks: set_monitor_address.yml
+
+- name: import_tasks set_radosgw_address.yml
+  import_tasks: set_radosgw_address.yml
+  when:
+    - inventory_hostname in groups.get(rgw_group_name, [])
diff --git a/roles/ceph-facts/tasks/main.yml b/roles/ceph-facts/tasks/main.yml
new file mode 100644 (file)
index 0000000..37b7149
--- /dev/null
@@ -0,0 +1,3 @@
+---
+- name: include facts.yml
+  include_tasks: facts.yml
diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml
new file mode 100644 (file)
index 0000000..7ac1534
--- /dev/null
@@ -0,0 +1,50 @@
+---
+- name: set_fact _monitor_address to monitor_address_block
+  set_fact:
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_' + ip_version + '_addresses'] | ipaddr(hostvars[item]['monitor_address_block']) | first | ipwrap }] }}"
+  with_items:
+    - "{{ groups.get(mon_group_name, []) }}"
+  when:
+    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
+    - hostvars[item]['monitor_address_block'] is defined
+    - hostvars[item]['monitor_address_block'] != 'subnet'
+
+- name: set_fact _monitor_address to monitor_address
+  set_fact:
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ipwrap}] }}"
+  with_items:
+    - "{{ groups.get(mon_group_name, []) }}"
+  when:
+    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+    - hostvars[item]['monitor_address'] is defined
+    - hostvars[item]['monitor_address'] != '0.0.0.0'
+
+- name: set_fact _monitor_address to monitor_interface - ipv4
+  set_fact:
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
+  with_items:
+    - "{{ groups.get(mon_group_name, []) }}"
+  when:
+    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+    - ip_version == 'ipv4'
+    - hostvars[item]['monitor_address_block'] | default('subnet')  == 'subnet'
+    - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
+    - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _monitor_address to monitor_interface - ipv6
+  set_fact:
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+  with_items:
+    - "{{ groups.get(mon_group_name, []) }}"
+  when:
+    - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
+    - ip_version == 'ipv6'
+    - hostvars[item]['monitor_address_block'] | default('subnet')  == 'subnet'
+    - hostvars[item]['monitor_address'] | default('0.0.0.0') == '0.0.0.0'
+    - hostvars[item]['monitor_interface'] | default('interface') != 'interface'
+
+- name: set_fact _current_monitor_address
+  set_fact:
+    _current_monitor_address: "{{ item.addr }}"
+  with_items: "{{ _monitor_addresses }}"
+  when: inventory_hostname == item.name
\ No newline at end of file
diff --git a/roles/ceph-facts/tasks/set_radosgw_address.yml b/roles/ceph-facts/tasks/set_radosgw_address.yml
new file mode 100644 (file)
index 0000000..b0dcd03
--- /dev/null
@@ -0,0 +1,35 @@
+---
+- name: set_fact _radosgw_address to radosgw_address_block
+  set_fact:
+    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first | ipwrap }}"
+  when:
+    - radosgw_address_block is defined
+    - radosgw_address_block != 'subnet'
+
+- name: set_fact _radosgw_address to radosgw_address
+  set_fact:
+    _radosgw_address: "{{ radosgw_address | ipwrap }}"
+  when:
+    - radosgw_address is defined
+    - radosgw_address != '0.0.0.0'
+
+- block:
+  - name: set_fact _interface
+    set_fact:
+      _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
+
+  - name: set_fact _radosgw_address to radosgw_interface - ipv4
+    set_fact:
+      _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
+    when:
+      - ip_version == 'ipv4'
+
+  - name: set_fact _radosgw_address to radosgw_interface - ipv6
+    set_fact:
+      _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] }}"
+    when:
+      - ip_version == 'ipv6'
+  when:
+    - radosgw_address_block == 'subnet'
+    - radosgw_address == '0.0.0.0'
+    - radosgw_interface != 'interface'
index 3c4425ebf3e90a00531d79c014fceebc9cbe4bbe..1a9a321b372045cc8359c043ef3a6da2e90406a6 100644 (file)
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
   gather_facts: false
   become: True
   tasks:
+    - import_role:
+        name: ceph-defaults
     - name: check if podman binary is present
       stat:
         path: /usr/bin/podman
 
     - name: get ceph status from the first monitor
       command: >
-        {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+        {{ 'podman' if podman_binary.stat.exists and ansible_distribution == 'Fedora' else 'docker' }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
-      ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
 
-    - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+    - name: "show ceph status for cluster {{ cluster }}"
       debug:
         msg: "{{ ceph_status.stdout_lines }}"
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
       when: not ceph_status.failed
index b963f2df2a04740a96d16d8dea09c9c817dc3784..f594633d7c759ac08b9c26908575dd5fd5806659 100644 (file)
@@ -96,6 +96,9 @@
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
     - import_role:
         name: ceph-defaults
       tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-facts
+      tags: ['ceph_update_config']
     - import_role:
         name: ceph-handler
     - import_role:
   gather_facts: false
   become: True
   tasks:
+    - import_role:
+        name: ceph-defaults
     - name: get ceph status from the first monitor
-      command: ceph --cluster {{ cluster | default ('ceph') }} -s
+      command: ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
-      ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
 
-    - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+    - name: "show ceph status for cluster {{ cluster }}"
       debug:
         msg: "{{ ceph_status.stdout_lines }}"
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
       when: not ceph_status.failed