]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
shrink-osd: support fqdn in inventory
authorGuillaume Abrioux <gabrioux@redhat.com>
Mon, 9 Dec 2019 14:52:26 +0000 (15:52 +0100)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 9 Jan 2020 08:24:22 +0000 (09:24 +0100)
When using fqdn in inventory, that playbook fails because of some tasks
using the result of ceph osd tree (which returns shortname) to get
some datas in hostvars[].

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1779021
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 6d9ca6b05b52694dec53ce61fdc16bb83c93979d)

infrastructure-playbooks/shrink-osd-ceph-disk.yml
infrastructure-playbooks/shrink-osd.yml

index 8059e2e8e320256b690d29d7d960fa4d6103e6e3..8bf1dba3f75831e07cccb26ad970f090d2b63817 100644 (file)
         - "{{ osd_to_kill_disks.results }}"
         - "{{ osd_hosts }}"
 
+    - name: set_fact _osd_hosts
+      set_fact:
+        _osd_hosts: "{{ _osd_hosts | default([]) + [hostvars[item.0]['inventory_hostname']] }}"
+      with_nested:
+        - "{{ groups.get(osd_group_name) }}"
+        - "{{ osd_hosts }}"
+      when: hostvars[item.0]['ansible_hostname'] == item.1
+
     - name: zap ceph osd disks
       shell: |
         docker run --rm \
       delegate_to: "{{ item.1 }}"
       with_together:
         - "{{ resolved_parent_device.results }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
       when:
         - containerized_deployment
 
       delegate_to: "{{ item.1 }}"
       with_together:
         - "{{ resolved_parent_device.results }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
       when:
         - containerized_deployment
 
           fi
         done
       with_together:
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
         - "{{ osd_to_kill_disks_dedicated.results }}"
       delegate_to: "{{ item.0 }}"
       when:
       run_once: true
       with_together:
         - "{{ osd_to_kill.split(',') }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
       delegate_to: "{{ item.1 }}"
       when:
         - not containerized_deployment
       run_once: true
       with_together:
         - "{{ osd_to_kill.split(',') }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
       delegate_to: "{{ item.1 }}"
       when:
         - not containerized_deployment
       delegate_to: "{{ item.1 }}"
       with_together:
         - "{{ resolved_parent_device.results }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
 
     - name: remove osd(s) from crush_map when ceph-disk destroy fail
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush remove osd.{{ item }}"
             fi
           done
       with_together:
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
         - "{{ osd_to_kill_disks_dedicated_non_container.results }}"
       delegate_to: "{{ item.0 }}"
       when:
         state: absent
       with_together:
         - "{{ osd_to_kill.split(',') }}"
-        - "{{ osd_hosts }}"
+        - "{{ _osd_hosts }}"
       delegate_to: "{{ item.1 }}"
 
     - name: show ceph health
index e5d8e8d5be638fb4b6b864eb6ce056ceca1a92d0..3cac706dfc02a3d93e08c74c39b34078741b1933 100644 (file)
         osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}"
       with_items: "{{ find_osd_hosts.results }}"
 
+    - name: set_fact _osd_hosts
+      set_fact:
+        _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2 ] ] }}"
+      with_nested:
+        - "{{ groups.get(osd_group_name) }}"
+        - "{{ osd_hosts }}"
+      when: hostvars[item.0]['ansible_hostname'] == item.1
+
     - name: mark osd(s) out of the cluster
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
       run_once: true
         name: ceph-osd@{{ item.0 }}
         state: stopped
         enabled: no
-      loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}"
+      loop: "{{ osd_to_kill.split(',')|zip(_osd_hosts)|list }}"
       delegate_to: "{{ item.1.0 }}"
 
     - name: zap osd devices
         CEPH_VOLUME_DEBUG: 1
         CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
       delegate_to: "{{ item.0 }}"
-      loop: "{{ osd_hosts }}"
+      loop: "{{ _osd_hosts }}"
 
     - name: purge osd(s) from the cluster
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"