From 735e1917db87a086638e1cd0af3b467350a219d5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?S=C3=A9bastien=20Han?= Date: Thu, 19 Jul 2018 15:45:55 +0200 Subject: [PATCH] shrink-osd: purge dedicated devices MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Once the OSD is destroyed we also have to purge the associated devices, this means purging journal, db , wal partitions too. This now works for container and non-container. Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1572933 Signed-off-by: Sébastien Han --- infrastructure-playbooks/shrink-osd.yml | 55 +++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index f1c2d82f4..25e9047d1 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -116,6 +116,29 @@ when: - containerized_deployment + - name: find osd dedicated devices - container + shell: > + docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} + list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]' + with_together: + - "{{ osd_to_kill.split(',') }}" + - "{{ osd_hosts }}" + register: osd_to_kill_disks_dedicated + delegate_to: "{{ item.1 }}" + when: + - containerized_deployment + + - name: find osd dedicated devices - non container + shell: ceph-disk list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]' + with_together: + - "{{ osd_to_kill.split(',') }}" + - "{{ osd_hosts }}" + register: osd_to_kill_disks_dedicated_non_container + delegate_to: "{{ item.1 }}" + when: + - not containerized_deployment + # if nvme then osd_to_kill_disks is nvme0n1, we need nvme0 # if ssd or hdd then osd_to_kill_disks is sda1, we need sda - name: stop osd services (container) @@ -156,6 +179,23 @@ when: - containerized_deployment + - name: zap ceph osd partitions from dedicated devices + shell: | + docker run --rm \ + --privileged=true \ + --name ceph-osd-zap-{{ ansible_hostname }}-{{ item.1.stdout }} \ + -v /dev/:/dev/ \ + -e OSD_DEVICE={{ item.1.stdout }} \ + {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + zap_device + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_kill_disks_dedicated.results }}" + delegate_to: "{{ item.0 }}" + when: + - containerized_deployment + - item.1 | length > 0 + - name: deactivating osd(s) command: ceph-disk deactivate --cluster {{ cluster }} --deactivate-by-id {{ item.0 }} --mark-out run_once: true @@ -192,6 +232,21 @@ delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ osd_to_kill.split(',') }}" + - name: zap dedicated partitions + shell: | + pkname=$(lsblk --nodeps -no PKNAME {{ item.1.stdout_lines }}) + wipefs --all {{ item.1.stdout_lines }} + dd if=/dev/zero of={{ item.1.stdout_lines }} bs=1M count=10 + partition_nb=$(echo {{ item.1.stdout_lines }} | grep -oE '[0-9]{1,2}$') + sgdisk --delete $partition_nb /dev/$pkname + with_together: + - "{{ osd_hosts }}" + - "{{ osd_to_kill_disks_dedicated_non_container.results }}" + delegate_to: "{{ item.0 }}" + when: + - not containerized_deployment + - item.1 | length > 0 + - name: show ceph health command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ groups[mon_group_name][0] }}" -- 2.39.5