]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
introduce new role ceph-facts
authorGuillaume Abrioux <gabrioux@redhat.com>
Mon, 10 Dec 2018 14:46:32 +0000 (15:46 +0100)
committerSébastien Han <seb@redhat.com>
Mon, 7 Jan 2019 08:14:10 +0000 (09:14 +0100)
sometimes we play the whole role `ceph-defaults` just to access the
default value of some variables. It means we play the `facts.yml` part
in this role while it's not desired. Splitting this role will speedup
the playbook.

Closes: #3282
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 0eb56e36f8ce52015aa6c343faccd589e5fd2c6c)

15 files changed:
infrastructure-playbooks/add-osd.yml
infrastructure-playbooks/purge-docker-cluster.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/shrink-mon.yml
infrastructure-playbooks/shrink-osd.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
roles/ceph-defaults/tasks/facts.yml [deleted file]
roles/ceph-defaults/tasks/main.yml
roles/ceph-facts/README.md [new file with mode: 0644]
roles/ceph-facts/defaults/main.yml [new file with mode: 0644]
roles/ceph-facts/meta/main.yml [new file with mode: 0644]
roles/ceph-facts/tasks/facts.yml [new file with mode: 0644]
roles/ceph-facts/tasks/main.yml [new file with mode: 0644]
site-docker.yml.sample
site.yml.sample

index edf15ebc55faa16a38529bfe460b83772fba21cc..cb02a74c034647cc5cc6bd18dcbf18c299b9d4d6 100644 (file)
@@ -44,6 +44,7 @@
   roles:
     - ceph-defaults
     - ceph-validate
+    - ceph-facts
 
 - hosts: osds
   gather_facts: False
@@ -67,6 +68,7 @@
     - role: ceph-infra
     - role: ceph-docker-common
       when: containerized_deployment | bool
+    - role: ceph-facts
     - role: ceph-common
       when: not containerized_deployment | bool
     - role: ceph-config
index 7d7ae55995b5f0ef212dde243e4432a3c0b6f7c1..37764d09994c39f9112b0704d1a37dcf3d374f8a 100644 (file)
   gather_facts: true
   become: true
 
-  tasks:
-
-  - import_role:
-      name: ceph-defaults
-      private: false
-
-  - name: gather monitors facts
-    setup:
-    delegate_to: "{{ item }}"
-    delegate_facts: True
-    with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
-
-  - import_role:
-      name: ceph-facts
-      private: false
-
-  - name: get all the running osds
-    shell: |
-      systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
-    register: osd_units
-    ignore_errors: true
-
-  - name: disable ceph osd service
-    service:
-      name: "{{ item }}"
-      state: stopped
-      enabled: no
-    with_items: "{{ osd_units.stdout_lines }}"
-
-  - name: remove osd mountpoint tree
-    file:
-      path: /var/lib/ceph/osd/
-      state: absent
-    register: remove_osd_mountpoints
-    ignore_errors: true
-
-  - name: for ceph-disk based deployment
-    block:
-      - name: get prepare container
-        command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
-        register: prepare_containers
-        ignore_errors: true
-
-      - name: remove ceph osd prepare container
-        command: "docker rm -f {{ item }}"
-        with_items: "{{ prepare_containers.stdout_lines }}"
-        ignore_errors: true
-
-      # NOTE(leseb): hope someone will find a more elegant way one day...
-      - name: see if encrypted partitions are present
-        shell: |
-          blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
-        register: encrypted_ceph_partuuid
-
-      - name: get ceph data partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph data"
-        failed_when: false
-        register: ceph_data_partition_to_erase_path
-
-      - name: get ceph lockbox partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph lockbox"
-        failed_when: false
-        register: ceph_lockbox_partition_to_erase_path
-
-      - name: get ceph block partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block"
-        failed_when: false
-        register: ceph_block_partition_to_erase_path
-
-      - name: get ceph journal partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph journal"
-        failed_when: false
-        register: ceph_journal_partition_to_erase_path
-
-      - name: get ceph db partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block.db"
-        failed_when: false
-        register: ceph_db_partition_to_erase_path
-
-      - name: get ceph wal partitions
-        command: |
-          blkid -o device -t PARTLABEL="ceph block.wal"
-        failed_when: false
-        register: ceph_wal_partition_to_erase_path
-
-      - name: set_fact combined_devices_list
-        set_fact:
-          combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_block_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_db_partition_to_erase_path.get('stdout_lines', []) +
-                                    ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
-
-      - name: resolve parent device
-        command: lsblk --nodeps -no pkname "{{ item }}"
-        register: tmp_resolved_parent_device
-        with_items:
-          - "{{ combined_devices_list }}"
-
-      - name: set_fact resolved_parent_device
-        set_fact:
-          resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
-
-      - name: zap ceph osd disks
-        shell: |
-          docker run --rm \
-          --privileged=true \
-          --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
-          -v /dev/:/dev/ \
-          -e OSD_DEVICE=/dev/{{ item }} \
-          {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-          zap_device
-        with_items:
-          - "{{ resolved_parent_device }}"
-
-      - name: wait until the zap containers die
-        shell: |
-          docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
-        register: zap_alive
-        failed_when: false
-        until: zap_alive.rc != 0
-        retries: 5
-        delay: 10
-
-      - name: remove ceph osd zap disk container
-        docker_container:
-          image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
-          state: absent
-        with_items:
-          - "{{ resolved_parent_device }}"
-
-      - name: remove ceph osd service
-        file:
-          path: /etc/systemd/system/ceph-osd@.service
-          state: absent
-    when:
-      - osd_scenario != "lvm"
-
-  - name: for ceph-volume based deployments
-    block:
-      - name: zap and destroy osds created by ceph-volume with lvm_volumes
-        ceph_volume:
-          data: "{{ item.data }}"
-          data_vg: "{{ item.data_vg|default(omit) }}"
-          journal: "{{ item.journal|default(omit) }}"
-          journal_vg: "{{ item.journal_vg|default(omit) }}"
-          db: "{{ item.db|default(omit) }}"
-          db_vg: "{{ item.db_vg|default(omit) }}"
-          wal: "{{ item.wal|default(omit) }}"
-          wal_vg: "{{ item.wal_vg|default(omit) }}"
-          action: "zap"
-        environment:
-          CEPH_VOLUME_DEBUG: 1
-          CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          CEPH_CONTAINER_BINARY: "docker"
-        with_items: "{{ lvm_volumes }}"
-
-      - name: zap and destroy osds created by ceph-volume with devices
-        ceph_volume:
-          data: "{{ item }}"
-          action: "zap"
-        environment:
-          CEPH_VOLUME_DEBUG: 1
-          CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-          CEPH_CONTAINER_BINARY: "docker"
-        with_items: "{{ devices | default([]) }}"
-    when:
-      - osd_scenario == "lvm"
-
-  - name: remove ceph osd image
-    docker_image:
-      state: absent
-      repository: "{{ ceph_docker_registry }}"
-      name: "{{ ceph_docker_image }}"
-      tag: "{{ ceph_docker_image_tag }}"
-      force: yes
-    tags:
-      remove_img
-    ignore_errors: true
-
-  - name: include vars from group_vars/osds.yml
-    include_vars:
-      file: "{{ item }}"
-    with_first_found:
-      - files:
-        - "{{ playbook_dir }}/group_vars/osds"
-        - "{{ playbook_dir }}/group_vars/osds.yml"
-        skip: true
-
-  - name: find all osd_disk_prepare logs
-    find:
-      paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
-      pattern: "ceph-osd-prepare-*.log"
-    register: osd_disk_prepare_logs
-
-  - name: ensure all osd_disk_prepare logs are removed
-    file:
-      path: "{{ item.path }}"
-      state: absent
-    with_items:
-      - "{{ osd_disk_prepare_logs.files }}"
+  # This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts
+  roles:
+    - ceph-defaults
+
+  post_tasks:
+    - name: gather monitors facts
+      setup:
+      delegate_to: "{{ item }}"
+      delegate_facts: True
+      with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
+
+    - import_role:
+        name: ceph-facts
+        private: false
+
+    - name: get all the running osds
+      shell: |
+        systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
+      register: osd_units
+      ignore_errors: true
+
+    - name: disable ceph osd service
+      service:
+        name: "{{ item }}"
+        state: stopped
+        enabled: no
+      with_items: "{{ osd_units.stdout_lines }}"
+
+    - name: remove osd mountpoint tree
+      file:
+        path: /var/lib/ceph/osd/
+        state: absent
+      register: remove_osd_mountpoints
+      ignore_errors: true
+
+    - name: for ceph-disk based deployment
+      block:
+        - name: get prepare container
+          command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
+          register: prepare_containers
+          ignore_errors: true
+
+        - name: remove ceph osd prepare container
+          command: "docker rm -f {{ item }}"
+          with_items: "{{ prepare_containers.stdout_lines }}"
+          ignore_errors: true
+
+        # NOTE(leseb): hope someone will find a more elegant way one day...
+        - name: see if encrypted partitions are present
+          shell: |
+            blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
+          register: encrypted_ceph_partuuid
+
+        - name: get ceph data partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph data"
+          failed_when: false
+          register: ceph_data_partition_to_erase_path
+
+        - name: get ceph lockbox partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph lockbox"
+          failed_when: false
+          register: ceph_lockbox_partition_to_erase_path
+
+        - name: get ceph block partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph block"
+          failed_when: false
+          register: ceph_block_partition_to_erase_path
+
+        - name: get ceph journal partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph journal"
+          failed_when: false
+          register: ceph_journal_partition_to_erase_path
+
+        - name: get ceph db partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph block.db"
+          failed_when: false
+          register: ceph_db_partition_to_erase_path
+
+        - name: get ceph wal partitions
+          command: |
+            blkid -o device -t PARTLABEL="ceph block.wal"
+          failed_when: false
+          register: ceph_wal_partition_to_erase_path
+
+        - name: set_fact combined_devices_list
+          set_fact:
+            combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
+                                      ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
+                                      ceph_block_partition_to_erase_path.get('stdout_lines', []) +
+                                      ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
+                                      ceph_db_partition_to_erase_path.get('stdout_lines', []) +
+                                      ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
+
+        - name: resolve parent device
+          command: lsblk --nodeps -no pkname "{{ item }}"
+          register: tmp_resolved_parent_device
+          with_items:
+            - "{{ combined_devices_list }}"
+
+        - name: set_fact resolved_parent_device
+          set_fact:
+            resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
+
+        - name: zap ceph osd disks
+          shell: |
+            docker run --rm \
+            --privileged=true \
+            --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
+            -v /dev/:/dev/ \
+            -e OSD_DEVICE=/dev/{{ item }} \
+            {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+            zap_device
+          with_items:
+            - "{{ resolved_parent_device }}"
+
+        - name: wait until the zap containers die
+          shell: |
+            docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
+          register: zap_alive
+          failed_when: false
+          until: zap_alive.rc != 0
+          retries: 5
+          delay: 10
+
+        - name: remove ceph osd zap disk container
+          docker_container:
+            image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+            name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
+            state: absent
+          with_items:
+            - "{{ resolved_parent_device }}"
+
+        - name: remove ceph osd service
+          file:
+            path: /etc/systemd/system/ceph-osd@.service
+            state: absent
+      when:
+        - osd_scenario != "lvm"
+
+    - name: for ceph-volume based deployments
+      block:
+        - name: zap and destroy osds created by ceph-volume with lvm_volumes
+          ceph_volume:
+            data: "{{ item.data }}"
+            data_vg: "{{ item.data_vg|default(omit) }}"
+            journal: "{{ item.journal|default(omit) }}"
+            journal_vg: "{{ item.journal_vg|default(omit) }}"
+            db: "{{ item.db|default(omit) }}"
+            db_vg: "{{ item.db_vg|default(omit) }}"
+            wal: "{{ item.wal|default(omit) }}"
+            wal_vg: "{{ item.wal_vg|default(omit) }}"
+            action: "zap"
+          environment:
+            CEPH_VOLUME_DEBUG: 1
+            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+            CEPH_CONTAINER_BINARY: "docker"
+          with_items: "{{ lvm_volumes }}"
+
+        - name: zap and destroy osds created by ceph-volume with devices
+          ceph_volume:
+            data: "{{ item }}"
+            action: "zap"
+          environment:
+            CEPH_VOLUME_DEBUG: 1
+            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+            CEPH_CONTAINER_BINARY: "docker"
+          with_items: "{{ devices | default([]) }}"
+      when:
+        - osd_scenario == "lvm"
+
+    - name: remove ceph osd image
+      docker_image:
+        state: absent
+        repository: "{{ ceph_docker_registry }}"
+        name: "{{ ceph_docker_image }}"
+        tag: "{{ ceph_docker_image_tag }}"
+        force: yes
+      tags:
+        remove_img
+      ignore_errors: true
+
+    - name: include vars from group_vars/osds.yml
+      include_vars:
+        file: "{{ item }}"
+      with_first_found:
+        - files:
+          - "{{ playbook_dir }}/group_vars/osds"
+          - "{{ playbook_dir }}/group_vars/osds.yml"
+          skip: true
+
+    - name: find all osd_disk_prepare logs
+      find:
+        paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
+        pattern: "ceph-osd-prepare-*.log"
+      register: osd_disk_prepare_logs
+
+    - name: ensure all osd_disk_prepare logs are removed
+      file:
+        path: "{{ item.path }}"
+        state: absent
+      with_items:
+        - "{{ osd_disk_prepare_logs.files }}"
 
 - name: purge ceph mon cluster
 
   gather_facts: true
   become: true
 
-  tasks:
-
-  - import_role:
-      name: ceph-defaults
-      private: false
+    # This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts'
+  roles:
+    - ceph-defaults
 
+  post_tasks:
   - import_role:
       name: ceph-facts
       private: false
index 0b3ef526770f88a84e9b24c1f8030fab6a240cd8..1d08ce11c4537fa93e96f7b909fdf91300e03067 100644 (file)
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
 
   tasks:
     - name: set_fact docker_exec_cmd_osd
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
index 8ead8da7644d0bab8cbe013bf8eba738761670ce..8446d3bd1c48a33d788ae38d8171bac443e9b5af 100644 (file)
@@ -72,6 +72,7 @@
 
   roles:
     - ceph-defaults
+    - ceph-facts
 
   post_tasks:
     - name: pick a monitor different than the one we want to remove
index 50e97a422ad2c013d34cb7a75c8664e6669ae8ce..c1003abafa61501286e6709f7c8ee1b6fc48de26 100644 (file)
@@ -57,6 +57,7 @@
 
   roles:
     - ceph-defaults
+    - ceph-facts
 
   post_tasks:
 
index 5a954c8d0e1c5e556767ccfb78f6df65f6456d90..1267396243a6048f50ca8cbc9e98667245e70650 100644 (file)
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-mon
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-mgr
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-osd
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-mds
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-rgw
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-rbd-mirror
 
   roles:
     - ceph-defaults
+    - ceph-facts
     - ceph-handler
     - ceph-docker-common
     - ceph-nfs
diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml
deleted file mode 100644 (file)
index 0eb1d3a..0000000
+++ /dev/null
@@ -1,249 +0,0 @@
----
-- name: check if it is atomic host
-  stat:
-    path: /run/ostree-booted
-  register: stat_ostree
-
-- name: set_fact is_atomic
-  set_fact:
-    is_atomic: "{{ stat_ostree.stat.exists }}"
-
-- name: set_fact monitor_name ansible_hostname
-  set_fact:
-    monitor_name: "{{ ansible_hostname }}"
-  when:
-    - not mon_use_fqdn
-
-- name: set_fact monitor_name ansible_fqdn
-  set_fact:
-    monitor_name: "{{ ansible_fqdn }}"
-  when:
-    - mon_use_fqdn
-
-- name: set_fact docker_exec_cmd
-  set_fact:
-    docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - containerized_deployment
-    - groups.get(mon_group_name, []) | length > 0
-
-# this task shouldn't run in a rolling_update situation
-# because it blindly picks a mon, which may be down because
-# of the rolling update
-- name: is ceph running already?
-  command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
-  changed_when: false
-  failed_when: false
-  check_mode: no
-  register: ceph_current_status
-  run_once: true
-  delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - not rolling_update
-    - groups.get(mon_group_name, []) | length > 0
-
-# We want this check to be run only on the first node
-- name: check if {{ fetch_directory }} directory exists
-  local_action:
-    module: stat
-    path: "{{ fetch_directory }}/monitor_keyring.conf"
-  become: false
-  register: monitor_keyring_conf
-  run_once: true
-
-# set this as a default when performing a rolling_update
-# so the rest of the tasks here will succeed
-- name: set_fact ceph_current_status rc 1
-  set_fact:
-    ceph_current_status:
-      rc: 1
-  when:
-    - rolling_update or groups.get(mon_group_name, []) | length == 0
-
-- name: create a local fetch directory if it does not exist
-  local_action:
-    module: file
-    path: "{{ fetch_directory }}"
-    state: directory
-  changed_when: false
-  become: false
-  when:
-    - (cephx or generate_fsid)
-
-- name: set_fact ceph_current_status (convert to json)
-  set_fact:
-    ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
-  when:
-    - not rolling_update
-    - ceph_current_status.rc == 0
-
-- name: set_fact fsid from ceph_current_status
-  set_fact:
-    fsid: "{{ ceph_current_status.fsid }}"
-  when:
-    - ceph_current_status.fsid is defined
-
-# Set ceph_release to ceph_stable by default
-- name: set_fact ceph_release ceph_stable_release
-  set_fact:
-    ceph_release: "{{ ceph_stable_release }}"
-
-- name: generate cluster fsid
-  local_action:
-    module: shell
-      python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  register: cluster_uuid
-  become: false
-  when:
-    - generate_fsid
-    - ceph_current_status.fsid is undefined
-
-- name: reuse cluster fsid when cluster is already running
-  local_action:
-    module: shell
-      echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
-    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  become: false
-  when:
-    - ceph_current_status.fsid is defined
-
-- name: read cluster fsid if it already exists
-  local_action:
-    module: command
-      cat {{ fetch_directory }}/ceph_cluster_uuid.conf
-    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
-  changed_when: false
-  register: cluster_uuid
-  become: false
-  check_mode: no
-  when:
-    - generate_fsid
-
-- name: set_fact fsid
-  set_fact:
-    fsid: "{{ cluster_uuid.stdout }}"
-  when:
-    - generate_fsid
-
-- name: set_fact mds_name ansible_hostname
-  set_fact:
-    mds_name: "{{ ansible_hostname }}"
-  when:
-    - not mds_use_fqdn
-
-- name: set_fact mds_name ansible_fqdn
-  set_fact:
-    mds_name: "{{ ansible_fqdn }}"
-  when:
-    - mds_use_fqdn
-
-- name: set_fact rbd_client_directory_owner ceph
-  set_fact:
-    rbd_client_directory_owner: ceph
-  when:
-    - rbd_client_directory_owner is not defined
-      or not rbd_client_directory_owner
-
-- name: set_fact rbd_client_directory_group rbd_client_directory_group
-  set_fact:
-    rbd_client_directory_group: ceph
-  when:
-    - rbd_client_directory_group is not defined
-      or not rbd_client_directory_group
-
-- name: set_fact rbd_client_directory_mode 0770
-  set_fact:
-    rbd_client_directory_mode: "0770"
-  when:
-    - rbd_client_directory_mode is not defined
-      or not rbd_client_directory_mode
-
-- name: resolve device link(s)
-  command: readlink -f {{ item }}
-  changed_when: false
-  with_items: "{{ devices }}"
-  register: devices_prepare_canonicalize
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build devices from resolved symlinks
-  set_fact:
-    devices: "{{ devices | default([]) + [ item.stdout ] }}"
-  with_items: "{{ devices_prepare_canonicalize.results }}"
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build final devices list
-  set_fact:
-    devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
-  when:
-    - devices is defined
-    - inventory_hostname in groups.get(osd_group_name, [])
-    - not osd_auto_discovery|default(False)
-    - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact ceph_uid for debian based system - non container
-  set_fact:
-    ceph_uid: 64045
-  when:
-    - not containerized_deployment
-    - ansible_os_family == 'Debian'
-
-- name: set_fact ceph_uid for red hat or suse based system - non container
-  set_fact:
-    ceph_uid: 167
-  when:
-    - not containerized_deployment
-    - ansible_os_family in ['RedHat', 'Suse']
-
-- name: set_fact ceph_uid for debian based system - container
-  set_fact:
-    ceph_uid: 64045
-  when:
-    - containerized_deployment
-    - ceph_docker_image_tag | string is search("ubuntu")
-
-- name: set_fact ceph_uid for red hat based system - container
-  set_fact:
-    ceph_uid: 167
-  when:
-    - containerized_deployment
-    - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
-
-- name: set_fact ceph_uid for red hat
-  set_fact:
-    ceph_uid: 167
-  when:
-    - containerized_deployment
-    - ceph_docker_image is search("rhceph")
-
-- name: set_fact rgw_hostname
-  set_fact:
-    rgw_hostname: "{% set _value = ansible_hostname -%}
-    {% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() -%}
-    {% if key == ansible_fqdn -%}
-    {% set _value = key -%}
-    {% endif -%}
-    {% endfor -%}
-    {{ _value }}"
-  when:
-    - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
-    - ceph_current_status['servicemap'] is defined
-    - ceph_current_status['servicemap']['services'] is defined
-    - ceph_current_status['servicemap']['services']['rgw'] is defined
-
-- name: set_fact osd_pool_default_pg_num
-  set_fact:
-    osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
-
-- name: set_fact osd_pool_default_size
-  set_fact:
-    osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
index 37b7149d23a9494788e5b806e7c5aee67b0ef024..73b314ff7c704c18889cf90fdc024716c634adb6 100644 (file)
@@ -1,3 +1 @@
----
-- name: include facts.yml
-  include_tasks: facts.yml
+---
\ No newline at end of file
diff --git a/roles/ceph-facts/README.md b/roles/ceph-facts/README.md
new file mode 100644 (file)
index 0000000..592982d
--- /dev/null
@@ -0,0 +1,3 @@
+# Ansible role: ceph-facts
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
diff --git a/roles/ceph-facts/defaults/main.yml b/roles/ceph-facts/defaults/main.yml
new file mode 100644 (file)
index 0000000..73b314f
--- /dev/null
@@ -0,0 +1 @@
+---
\ No newline at end of file
diff --git a/roles/ceph-facts/meta/main.yml b/roles/ceph-facts/meta/main.yml
new file mode 100644 (file)
index 0000000..b834c53
--- /dev/null
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+  company: Red Hat
+  author: Guillaume Abrioux
+  description: Set some facts for ceph to be deployed
+  license: Apache
+  min_ansible_version: 2.7
+  platforms:
+    - name: Ubuntu
+      versions:
+        - xenial
+    - name: EL
+      versions:
+        - 7
+  galaxy_tags:
+    - system
+dependencies: []
diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml
new file mode 100644 (file)
index 0000000..0eb1d3a
--- /dev/null
@@ -0,0 +1,249 @@
+---
+- name: check if it is atomic host
+  stat:
+    path: /run/ostree-booted
+  register: stat_ostree
+
+- name: set_fact is_atomic
+  set_fact:
+    is_atomic: "{{ stat_ostree.stat.exists }}"
+
+- name: set_fact monitor_name ansible_hostname
+  set_fact:
+    monitor_name: "{{ ansible_hostname }}"
+  when:
+    - not mon_use_fqdn
+
+- name: set_fact monitor_name ansible_fqdn
+  set_fact:
+    monitor_name: "{{ ansible_fqdn }}"
+  when:
+    - mon_use_fqdn
+
+- name: set_fact docker_exec_cmd
+  set_fact:
+    docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - containerized_deployment
+    - groups.get(mon_group_name, []) | length > 0
+
+# this task shouldn't run in a rolling_update situation
+# because it blindly picks a mon, which may be down because
+# of the rolling update
+- name: is ceph running already?
+  command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  register: ceph_current_status
+  run_once: true
+  delegate_to: "{{ groups[mon_group_name][0] }}"
+  when:
+    - not rolling_update
+    - groups.get(mon_group_name, []) | length > 0
+
+# We want this check to be run only on the first node
+- name: check if {{ fetch_directory }} directory exists
+  local_action:
+    module: stat
+    path: "{{ fetch_directory }}/monitor_keyring.conf"
+  become: false
+  register: monitor_keyring_conf
+  run_once: true
+
+# set this as a default when performing a rolling_update
+# so the rest of the tasks here will succeed
+- name: set_fact ceph_current_status rc 1
+  set_fact:
+    ceph_current_status:
+      rc: 1
+  when:
+    - rolling_update or groups.get(mon_group_name, []) | length == 0
+
+- name: create a local fetch directory if it does not exist
+  local_action:
+    module: file
+    path: "{{ fetch_directory }}"
+    state: directory
+  changed_when: false
+  become: false
+  when:
+    - (cephx or generate_fsid)
+
+- name: set_fact ceph_current_status (convert to json)
+  set_fact:
+    ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
+  when:
+    - not rolling_update
+    - ceph_current_status.rc == 0
+
+- name: set_fact fsid from ceph_current_status
+  set_fact:
+    fsid: "{{ ceph_current_status.fsid }}"
+  when:
+    - ceph_current_status.fsid is defined
+
+# Set ceph_release to ceph_stable by default
+- name: set_fact ceph_release ceph_stable_release
+  set_fact:
+    ceph_release: "{{ ceph_stable_release }}"
+
+- name: generate cluster fsid
+  local_action:
+    module: shell
+      python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  register: cluster_uuid
+  become: false
+  when:
+    - generate_fsid
+    - ceph_current_status.fsid is undefined
+
+- name: reuse cluster fsid when cluster is already running
+  local_action:
+    module: shell
+      echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+    creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  become: false
+  when:
+    - ceph_current_status.fsid is defined
+
+- name: read cluster fsid if it already exists
+  local_action:
+    module: command
+      cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+    removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+  changed_when: false
+  register: cluster_uuid
+  become: false
+  check_mode: no
+  when:
+    - generate_fsid
+
+- name: set_fact fsid
+  set_fact:
+    fsid: "{{ cluster_uuid.stdout }}"
+  when:
+    - generate_fsid
+
+- name: set_fact mds_name ansible_hostname
+  set_fact:
+    mds_name: "{{ ansible_hostname }}"
+  when:
+    - not mds_use_fqdn
+
+- name: set_fact mds_name ansible_fqdn
+  set_fact:
+    mds_name: "{{ ansible_fqdn }}"
+  when:
+    - mds_use_fqdn
+
+- name: set_fact rbd_client_directory_owner ceph
+  set_fact:
+    rbd_client_directory_owner: ceph
+  when:
+    - rbd_client_directory_owner is not defined
+      or not rbd_client_directory_owner
+
+- name: set_fact rbd_client_directory_group rbd_client_directory_group
+  set_fact:
+    rbd_client_directory_group: ceph
+  when:
+    - rbd_client_directory_group is not defined
+      or not rbd_client_directory_group
+
+- name: set_fact rbd_client_directory_mode 0770
+  set_fact:
+    rbd_client_directory_mode: "0770"
+  when:
+    - rbd_client_directory_mode is not defined
+      or not rbd_client_directory_mode
+
+- name: resolve device link(s)
+  command: readlink -f {{ item }}
+  changed_when: false
+  with_items: "{{ devices }}"
+  register: devices_prepare_canonicalize
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build devices from resolved symlinks
+  set_fact:
+    devices: "{{ devices | default([]) + [ item.stdout ] }}"
+  with_items: "{{ devices_prepare_canonicalize.results }}"
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build final devices list
+  set_fact:
+    devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
+  when:
+    - devices is defined
+    - inventory_hostname in groups.get(osd_group_name, [])
+    - not osd_auto_discovery|default(False)
+    - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact ceph_uid for debian based system - non container
+  set_fact:
+    ceph_uid: 64045
+  when:
+    - not containerized_deployment
+    - ansible_os_family == 'Debian'
+
+- name: set_fact ceph_uid for red hat or suse based system - non container
+  set_fact:
+    ceph_uid: 167
+  when:
+    - not containerized_deployment
+    - ansible_os_family in ['RedHat', 'Suse']
+
+- name: set_fact ceph_uid for debian based system - container
+  set_fact:
+    ceph_uid: 64045
+  when:
+    - containerized_deployment
+    - ceph_docker_image_tag | string is search("ubuntu")
+
+- name: set_fact ceph_uid for red hat based system - container
+  set_fact:
+    ceph_uid: 167
+  when:
+    - containerized_deployment
+    - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
+
+- name: set_fact ceph_uid for red hat
+  set_fact:
+    ceph_uid: 167
+  when:
+    - containerized_deployment
+    - ceph_docker_image is search("rhceph")
+
+- name: set_fact rgw_hostname
+  set_fact:
+    rgw_hostname: "{% set _value = ansible_hostname -%}
+    {% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() -%}
+    {% if key == ansible_fqdn -%}
+    {% set _value = key -%}
+    {% endif -%}
+    {% endfor -%}
+    {{ _value }}"
+  when:
+    - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
+    - ceph_current_status['servicemap'] is defined
+    - ceph_current_status['servicemap']['services'] is defined
+    - ceph_current_status['servicemap']['services']['rgw'] is defined
+
+- name: set_fact osd_pool_default_pg_num
+  set_fact:
+    osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
+- name: set_fact osd_pool_default_size
+  set_fact:
+    osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
diff --git a/roles/ceph-facts/tasks/main.yml b/roles/ceph-facts/tasks/main.yml
new file mode 100644 (file)
index 0000000..37b7149
--- /dev/null
@@ -0,0 +1,3 @@
+---
+- name: include facts.yml
+  include_tasks: facts.yml
index 8b9f74054d4e58b9958956a24e7d4808216f15be..82099535fd314e506902218d0583f61f44513ef9 100644 (file)
@@ -53,6 +53,8 @@
   roles:
     - role: ceph-defaults
       tags: [with_pkg, fetch_container_image]
+    - role: ceph-facts
+      tags: [with_pkg, fetch_container_image]
     - role: ceph-validate
     - role: ceph-infra
     - role: ceph-handler
@@ -89,6 +91,8 @@
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-docker-common
       when:
             start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
   roles:
     - { role: ceph-defaults, tags: ['ceph_update_config'] }
+    - { role: ceph-facts, tags: ['ceph_update_config'] }
     - role: ceph-handler
     - ceph-docker-common
     - { role: ceph-config, tags: ['ceph_update_config'] }
   any_errors_fatal: true
   gather_facts: false
   become: True
+  roles:
+    - ceph-defaults
   tasks:
     - name: get ceph status from the first monitor
-      command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+      command: docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
       ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
 
-    - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+    - name: "show ceph status for cluster {{ cluster }}"
       debug:
         msg: "{{ ceph_status.stdout_lines }}"
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
       when: not ceph_status.failed
\ No newline at end of file
index de25ff8e3ce9c52046d8873908f2f4305931ebb0..e3e33b64644548b9c455a53659238ee0d4c9a195 100644 (file)
@@ -92,6 +92,8 @@
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   roles:
     - role: ceph-defaults
       tags: ['ceph_update_config']
+    - role: ceph-facts
+      tags: ['ceph_update_config']
     - role: ceph-handler
     - role: ceph-common
     - role: ceph-config
   gather_facts: false
   become: True
   any_errors_fatal: true
-  tasks:
+  roles:
+    - role: ceph-defaults
+  post_tasks:
     - name: get ceph status from the first monitor
-      command: ceph --cluster {{ cluster | default ('ceph') }} -s
+      command: ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
-      ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
 
-    - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+    - name: "show ceph status for cluster {{ cluster }}"
       debug:
         msg: "{{ ceph_status.stdout_lines }}"
-      delegate_to: "{{ groups['mons'][0] }}"
+      delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: true
       when: not ceph_status.failed
\ No newline at end of file