]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
shrink-osd: purge dedicated devices
authorSébastien Han <seb@redhat.com>
Thu, 19 Jul 2018 13:45:55 +0000 (15:45 +0200)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Tue, 18 Sep 2018 07:27:41 +0000 (07:27 +0000)
Once the OSD is destroyed we also have to purge the associated devices,
this means purging journal, db , wal partitions too.

This now works for container and non-container.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1572933
Signed-off-by: Sébastien Han <seb@redhat.com>
infrastructure-playbooks/shrink-osd.yml

index f1c2d82f46f3bc09b2e33f15e222d95bc2aec73f..25e9047d16e5c0fa93f679a36dba5cdb781e12ac 100644 (file)
       when:
         - containerized_deployment
 
+    - name: find osd dedicated devices - container
+      shell: >
+        docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
+        {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+        list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]'
+      with_together:
+        - "{{ osd_to_kill.split(',') }}"
+        - "{{ osd_hosts }}"
+      register: osd_to_kill_disks_dedicated
+      delegate_to: "{{ item.1 }}"
+      when:
+        - containerized_deployment
+
+    - name: find osd dedicated devices - non container
+      shell: ceph-disk list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]'
+      with_together:
+        - "{{ osd_to_kill.split(',') }}"
+        - "{{ osd_hosts }}"
+      register: osd_to_kill_disks_dedicated_non_container
+      delegate_to: "{{ item.1 }}"
+      when:
+        - not containerized_deployment
+
     # if nvme then osd_to_kill_disks is nvme0n1, we need nvme0
     # if ssd or hdd then osd_to_kill_disks is sda1, we need sda
     - name: stop osd services (container)
       when:
         - containerized_deployment
 
+    - name: zap ceph osd partitions from dedicated devices
+      shell: |
+        docker run --rm \
+        --privileged=true \
+        --name ceph-osd-zap-{{ ansible_hostname }}-{{ item.1.stdout }} \
+        -v /dev/:/dev/ \
+        -e OSD_DEVICE={{ item.1.stdout }} \
+        {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+        zap_device
+      with_together:
+        - "{{ osd_hosts }}"
+        - "{{ osd_to_kill_disks_dedicated.results }}"
+      delegate_to: "{{ item.0 }}"
+      when:
+        - containerized_deployment
+        - item.1 | length > 0
+
     - name: deactivating osd(s)
       command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out
       run_once: true
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items: "{{ osd_to_kill.split(',') }}"
 
+    - name: zap dedicated partitions
+      shell: |
+        pkname=$(lsblk --nodeps -no PKNAME {{ item.1.stdout_lines }})
+        wipefs --all {{ item.1.stdout_lines }}
+        dd if=/dev/zero of={{ item.1.stdout_lines }} bs=1M count=10
+        partition_nb=$(echo {{ item.1.stdout_lines }} | grep -oE '[0-9]{1,2}$')
+        sgdisk --delete $partition_nb /dev/$pkname
+      with_together:
+        - "{{ osd_hosts }}"
+        - "{{ osd_to_kill_disks_dedicated_non_container.results }}"
+      delegate_to: "{{ item.0 }}"
+      when:
+        - not containerized_deployment
+        - item.1 | length > 0
+
     - name: show ceph health
       command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s"
       delegate_to: "{{ groups[mon_group_name][0] }}"