tasks:
+ - import_role:
+ name: ceph-defaults
+ private: false
+
+ - import_role:
+ name: ceph-osd
+ private: false
+
- name: get all the running osds
shell: |
systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
enabled: no
with_items: "{{ osd_units.stdout_lines }}"
- - name: get prepare container
- command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
- register: prepare_containers
- ignore_errors: true
-
- - name: remove ceph osd prepare container
- command: "docker rm -f {{ item }}"
- with_items: "{{ prepare_containers.stdout_lines }}"
- ignore_errors: true
-
- # NOTE(leseb): hope someone will find a more elegant way one day...
- - name: see if encrypted partitions are present
- shell: |
- blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
- register: encrypted_ceph_partuuid
-
- name: remove osd mountpoint tree
file:
path: /var/lib/ceph/osd/
register: remove_osd_mountpoints
ignore_errors: true
- - name: get ceph data partitions
- command: |
- blkid -o device -t PARTLABEL="ceph data"
- failed_when: false
- register: ceph_data_partition_to_erase_path
-
- - name: get ceph lockbox partitions
- command: |
- blkid -o device -t PARTLABEL="ceph lockbox"
- failed_when: false
- register: ceph_lockbox_partition_to_erase_path
-
- - name: get ceph block partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block"
- failed_when: false
- register: ceph_block_partition_to_erase_path
-
- - name: get ceph journal partitions
- command: |
- blkid -o device -t PARTLABEL="ceph journal"
- failed_when: false
- register: ceph_journal_partition_to_erase_path
-
- - name: get ceph db partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block.db"
- failed_when: false
- register: ceph_db_partition_to_erase_path
-
- - name: get ceph wal partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block.wal"
- failed_when: false
- register: ceph_wal_partition_to_erase_path
-
- - name: set_fact combined_devices_list
- set_fact:
- combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
- ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
- ceph_block_partition_to_erase_path.get('stdout_lines', []) +
- ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
- ceph_db_partition_to_erase_path.get('stdout_lines', []) +
- ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
-
- - name: resolve parent device
- command: lsblk --nodeps -no pkname "{{ item }}"
- register: tmp_resolved_parent_device
- with_items:
- - "{{ combined_devices_list }}"
-
- - name: set_fact resolved_parent_device
- set_fact:
- resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
-
- - name: zap ceph osd disks
- shell: |
- docker run --rm \
- --privileged=true \
- --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
- -v /dev/:/dev/ \
- -e OSD_DEVICE=/dev/{{ item }} \
- {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
- zap_device
- with_items:
- - "{{ resolved_parent_device }}"
-
- - name: wait until the zap containers die
- shell: |
- docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
- register: zap_alive
- failed_when: false
- until: zap_alive.rc != 0
- retries: 5
- delay: 10
-
- - name: remove ceph osd zap disk container
- docker_container:
- image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
- state: absent
- with_items:
- - "{{ resolved_parent_device }}"
-
- - name: remove ceph osd service
- file:
- path: /etc/systemd/system/ceph-osd@.service
- state: absent
+ - name: for ceph-disk based deployment
+ block:
+ - name: get prepare container
+ command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
+ register: prepare_containers
+ ignore_errors: true
+
+ - name: remove ceph osd prepare container
+ command: "docker rm -f {{ item }}"
+ with_items: "{{ prepare_containers.stdout_lines }}"
+ ignore_errors: true
+
+ # NOTE(leseb): hope someone will find a more elegant way one day...
+ - name: see if encrypted partitions are present
+ shell: |
+ blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
+ register: encrypted_ceph_partuuid
+
+ - name: get ceph data partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph data"
+ failed_when: false
+ register: ceph_data_partition_to_erase_path
+
+ - name: get ceph lockbox partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph lockbox"
+ failed_when: false
+ register: ceph_lockbox_partition_to_erase_path
+
+ - name: get ceph block partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block"
+ failed_when: false
+ register: ceph_block_partition_to_erase_path
+
+ - name: get ceph journal partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph journal"
+ failed_when: false
+ register: ceph_journal_partition_to_erase_path
+
+ - name: get ceph db partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block.db"
+ failed_when: false
+ register: ceph_db_partition_to_erase_path
+
+ - name: get ceph wal partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block.wal"
+ failed_when: false
+ register: ceph_wal_partition_to_erase_path
+
+ - name: set_fact combined_devices_list
+ set_fact:
+ combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_block_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_db_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
+
+ - name: resolve parent device
+ command: lsblk --nodeps -no pkname "{{ item }}"
+ register: tmp_resolved_parent_device
+ with_items:
+ - "{{ combined_devices_list }}"
+
+ - name: set_fact resolved_parent_device
+ set_fact:
+ resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
+
+ - name: zap ceph osd disks
+ shell: |
+ docker run --rm \
+ --privileged=true \
+ --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
+ -v /dev/:/dev/ \
+ -e OSD_DEVICE=/dev/{{ item }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ zap_device
+ with_items:
+ - "{{ resolved_parent_device }}"
+
+ - name: wait until the zap containers die
+ shell: |
+ docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
+ register: zap_alive
+ failed_when: false
+ until: zap_alive.rc != 0
+ retries: 5
+ delay: 10
+
+ - name: remove ceph osd zap disk container
+ docker_container:
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
+ state: absent
+ with_items:
+ - "{{ resolved_parent_device }}"
+
+ - name: remove ceph osd service
+ file:
+ path: /etc/systemd/system/ceph-osd@.service
+ state: absent
+ when:
+ - osd_scenario != "lvm"
+
+ - name: for ceph-volume based deployments
+ block:
+ - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ ceph_volume:
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ lvm_volumes }}"
+
+ - name: zap and destroy osds created by ceph-volume with devices
+ ceph_volume:
+ data: "{{ item }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ devices | default([]) }}"
+ when:
+ - osd_scenario == "lvm"
- name: remove ceph osd image
docker_image: