]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
shrink_osd: use cv zap by fsid to remove parts/lvs
authorNoah Watkins <noahwatkins@gmail.com>
Thu, 17 Jan 2019 23:08:19 +0000 (15:08 -0800)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Wed, 6 Feb 2019 00:37:11 +0000 (00:37 +0000)
Fixes:
  https://bugzilla.redhat.com/show_bug.cgi?id=1569413
  https://bugzilla.redhat.com/show_bug.cgi?id=1572933

Note: rebased

Signed-off-by: Noah Watkins <noahwatkins@gmail.com>
(cherry picked from commit 9a43674d2e91ef46917cabe49651c46b630e5ace)

infrastructure-playbooks/shrink-osd.yml

index e0135284b7cbd1ecb78d77dcbf3eeca6a59bc29b..89d0bc082f35dbe5084adbe0c86c08adb2b06b3d 100644 (file)
 
     - name: set_fact osd_hosts
       set_fact:
-        osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
+        osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid ] ] }}"
       with_items: "{{ find_osd_hosts.results }}"
 
-    - name: find lvm osd volumes on each host
-      ceph_volume:
-        action: "list"
-      environment:
-        CEPH_VOLUME_DEBUG: 1
-        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
-      with_items: "{{ osd_hosts }}"
-      delegate_to: "{{ item }}"
-      register: osd_volumes
-
-    - name: filter osd volumes to kill by osd - non container
-      set_fact:
-        osd_volumes_to_kill_non_container: "{{ osd_volumes_to_kill_non_container | default([]) + [ (item.1.stdout|from_json)[item.0] ] }}"
-      with_together:
-        - "{{ osd_to_kill.split(',') }}"
-        - "{{ osd_volumes.results }}"
-
-    - name: generate (host / volume) pairs to zap - non container
-      set_fact:
-        osd_host_volumes_to_kill_non_container: "{%- set _val = namespace(devs=[]) -%}
-        {%- for host in osd_hosts -%}
-        {%- for dev in osd_volumes_to_kill_non_container[loop.index-1] -%}
-        {%- set _val.devs = _val.devs + [{\"host\": host, \"path\": dev.path}] -%}
-        {%- endfor -%}
-        {%- endfor -%}
-        {{ _val.devs }}"
-
     - name: mark osd(s) out of the cluster
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ item }}"
       run_once: true
         name: ceph-osd@{{ item.0 }}
         state: stopped
         enabled: no
-      with_together:
-        - "{{ osd_to_kill.split(',') }}"
-        - "{{ osd_hosts }}"
-      delegate_to: "{{ item.1 }}"
+      loop: "{{ osd_to_kill.split(',')|zip(osd_hosts)|list }}"
+      delegate_to: "{{ item.1.0 }}"
 
     - name: zap osd devices
       ceph_volume:
         action: "zap"
-        data: "{{ item.path }}"
+        osd_fsid: "{{ item.1 }}"
       environment:
         CEPH_VOLUME_DEBUG: 1
         CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
-      delegate_to: "{{ item.host }}"
-      with_items: "{{ osd_host_volumes_to_kill_non_container }}"
+        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+      delegate_to: "{{ item.0 }}"
+      loop: "{{ osd_hosts }}"
 
     - name: purge osd(s) from the cluster
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"