- name: confirm whether user really meant to purge the cluster
hosts: localhost
gather_facts: false
-
vars_prompt:
- name: ireallymeanit
prompt: Are you sure you want to purge the cluster?
default: 'no'
private: no
-
tasks:
- - name: exit playbook, if user did not mean to purge cluster
- fail:
- msg: >
- "Exiting purge-cluster playbook, cluster was NOT purged.
- To purge the cluster, either say 'yes' on the prompt or
- or use `-e ireallymeanit=yes` on the command line when
- invoking the playbook"
- when: ireallymeanit != 'yes'
+ - name: exit playbook, if user did not mean to purge cluster
+ fail:
+ msg: >
+ "Exiting purge-cluster playbook, cluster was NOT purged.
+ To purge the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
-- name: gather facts on all hosts
+- name: gather facts on all hosts
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ client_group_name|default('clients') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ client_group_name | default('clients') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
- grafana-server
become: true
-
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
+ - debug:
+ msg: "gather facts on all Ceph hosts for following reference"
+
- name: check there's no ceph kernel threads present
- hosts: "{{ client_group_name|default('clients') }}"
+ hosts: "{{ client_group_name | default('clients') }}"
become: true
any_errors_fatal: true
-
tasks:
- import_role:
name: ceph-defaults
- ceph
- libceph
-- name: purge ceph nfs cluster
+- name: purge ceph nfs cluster
vars:
nfs_group_name: nfss
-
- hosts: "{{ nfs_group_name|default('nfss') }}"
-
+ hosts: "{{ nfs_group_name | default('nfss') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
+ - name: stop ceph nfss with systemd
+ service:
+ name: nfs-ganesha
+ state: stopped
+ failed_when: false
+ when: ansible_facts['service_mgr'] == 'systemd'
- - name: stop ceph nfss with systemd
- service:
- name: nfs-ganesha
- state: stopped
- failed_when: false
- when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ client_group_name|default('clients') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ client_group_name | default('clients') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
- grafana-server
- clients
- iscsigws
-
become: true
-
tasks:
- import_role:
name: ceph-defaults
- grafana-server
- prometheus
- alertmanager
-
tasks:
- import_role:
name: ceph-defaults
- name: purge ceph mds cluster
-
vars:
mds_group_name: mdss
-
- hosts: "{{ mds_group_name|default('mdss') }}"
-
+ hosts: "{{ mds_group_name | default('mdss') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
-
- - name: stop ceph mdss with systemd
- service:
- name: ceph-mds@{{ ansible_facts['hostname'] }}
- state: stopped
- enabled: no
- failed_when: false
+ - name: stop ceph mdss with systemd
+ service:
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ failed_when: false
- name: purge ceph mgr cluster
-
vars:
mgr_group_name: mgrs
-
- hosts: "{{ mgr_group_name|default('mgrs') }}"
-
+ hosts: "{{ mgr_group_name | default('mgrs') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
+ - name: stop ceph mgrs with systemd
+ service:
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
+ state: stopped
+ enabled: no
+ failed_when: false
+ when: ansible_facts['service_mgr'] == 'systemd'
- - name: stop ceph mgrs with systemd
- service:
- name: ceph-mgr@{{ ansible_facts['hostname'] }}
- state: stopped
- enabled: no
- failed_when: false
- when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
-
vars:
rgwloadbalancer_group_name: rgwloadbalancers
-
hosts:
- - "{{ rgwloadbalancer_group_name|default('rgwloadbalancers') }}"
-
+ - "{{ rgwloadbalancer_group_name | default('rgwloadbalancers') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
+ - name: stop rgwloadbalancer services
+ service:
+ name: ['keepalived', 'haproxy']
+ state: stopped
+ enabled: no
+ failed_when: false
- - name: stop rgwloadbalancer services
- service:
- name: ['keepalived', 'haproxy']
- state: stopped
- enabled: no
- failed_when: false
- name: purge ceph rgw cluster
-
vars:
rgw_group_name: rgws
-
- hosts: "{{ rgw_group_name|default('rgws') }}"
-
+ hosts: "{{ rgw_group_name | default('rgws') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
- import_role:
name: ceph-defaults
- name: purge ceph rbd-mirror cluster
-
vars:
rbdmirror_group_name: rbdmirrors
-
- hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
-
+ hosts: "{{ rbdmirror_group_name | default('rbdmirrors') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
-
- - name: stop ceph rbd mirror with systemd
- service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
- state: stopped
- failed_when: false
+ - name: stop ceph rbd mirror with systemd
+ service:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: stopped
+ failed_when: false
- name: purge ceph osd cluster
-
vars:
osd_group_name: osds
reboot_osd_node: False
+ hosts: "{{ osd_group_name | default('osds') }}"
+ gather_facts: false # Already gathered previously
+ become: true
+ handlers:
+ - name: restart machine
+ shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+ async: 1
+ poll: 0
+ ignore_errors: true
+
+ - name: wait for server to boot
+ become: false
+ wait_for:
+ port: 22
+ host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
+ state: started
+ delay: 10
+ timeout: 500
+ delegate_to: localhost
+
+ - name: remove data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ tasks:
+ - import_role:
+ name: ceph-defaults
- hosts: "{{ osd_group_name|default('osds') }}"
+ - name: default lvm_volumes if not defined
+ set_fact:
+ lvm_volumes: []
+ when: lvm_volumes is not defined
- gather_facts: false # Already gathered previously
+ - name: get osd numbers
+ shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
+ register: osd_ids
+ changed_when: false
- become: true
+ - name: stop ceph-osd with systemd
+ service:
+ name: ceph-osd@{{ item }}
+ state: stopped
+ enabled: no
+ with_items: "{{ osd_ids.stdout_lines }}"
+ when: ansible_facts['service_mgr'] == 'systemd'
- handlers:
- - name: restart machine
- shell: sleep 2 && shutdown -r now "Ansible updates triggered"
- async: 1
- poll: 0
- ignore_errors: true
-
- - name: wait for server to boot
- become: false
- wait_for:
- port: 22
- host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
- state: started
- delay: 10
- timeout: 500
+ - name: remove ceph udev rules
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /usr/lib/udev/rules.d/95-ceph-osd.rules
+ - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
+
+ # NOTE(leseb): hope someone will find a more elegant way one day...
+ - name: see if encrypted partitions are present
+ shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa 306
+ register: encrypted_ceph_partuuid
+ changed_when: false
+
+ - name: get osd data and lockbox mount points
+ shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa 306
+ register: mounted_osd
+ changed_when: false
+
+ - name: drop all cache
+ shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
+ changed_when: false
+
+ - name: umount osd data partition
+ mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ mounted_osd.stdout_lines }}"
+
+ - name: remove osd mountpoint tree
+ file:
+ path: /var/lib/ceph/osd/
+ state: absent
+ register: remove_osd_mountpoints
+ ignore_errors: true
+
+ - name: is reboot needed
+ command: echo requesting reboot
delegate_to: localhost
+ become: false
+ notify:
+ - restart machine
+ - wait for server to boot
+ - remove data
+ when:
+ - reboot_osd_node | bool
+ - remove_osd_mountpoints.failed is defined
+
+ - name: wipe table on dm-crypt devices
+ command: dmsetup wipe_table --force "{{ item }}"
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: delete dm-crypt devices if any
+ command: dmsetup remove --retry --force {{ item }}
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: get payload_offset
+ shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa 306
+ register: payload_offset
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+
+ - name: get physical sector size
+ command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
+ changed_when: false
+ with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ when: encrypted_ceph_partuuid.stdout_lines | length > 0
+ register: phys_sector_size
- - name: remove data
- shell: rm -rf /var/lib/ceph/* # noqa 302
+ - name: wipe dmcrypt device
+ command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
+ changed_when: false
+ with_together:
+ - "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ - "{{ payload_offset.results }}"
+ - "{{ phys_sector_size.results }}"
+
+ - name: get ceph data partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph data"
+ changed_when: false
+ failed_when: false
+ register: ceph_data_partition_to_erase_path
- tasks:
+ - name: get ceph lockbox partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph lockbox"
+ changed_when: false
+ failed_when: false
+ register: ceph_lockbox_partition_to_erase_path
- - import_role:
- name: ceph-defaults
-
- - name: default lvm_volumes if not defined
- set_fact:
- lvm_volumes: []
- when: lvm_volumes is not defined
-
- - name: get osd numbers
- shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
- register: osd_ids
- changed_when: false
-
- - name: stop ceph-osd with systemd
- service:
- name: ceph-osd@{{ item }}
- state: stopped
- enabled: no
- with_items: "{{ osd_ids.stdout_lines }}"
- when: ansible_facts['service_mgr'] == 'systemd'
-
- - name: remove ceph udev rules
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /usr/lib/udev/rules.d/95-ceph-osd.rules
- - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
-
- # NOTE(leseb): hope someone will find a more elegant way one day...
- - name: see if encrypted partitions are present
- shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa 306
- register: encrypted_ceph_partuuid
- changed_when: false
-
- - name: get osd data and lockbox mount points
- shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa 306
- register: mounted_osd
- changed_when: false
-
- - name: drop all cache
- shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
- changed_when: false
-
- - name: umount osd data partition
- mount:
- path: "{{ item }}"
- state: unmounted
- with_items: "{{ mounted_osd.stdout_lines }}"
-
- - name: remove osd mountpoint tree
- file:
- path: /var/lib/ceph/osd/
- state: absent
- register: remove_osd_mountpoints
- ignore_errors: true
-
- - name: is reboot needed
- command: echo requesting reboot
- delegate_to: localhost
- become: false
- notify:
- - restart machine
- - wait for server to boot
- - remove data
- when:
- - reboot_osd_node | bool
- - remove_osd_mountpoints.failed is defined
-
- - name: wipe table on dm-crypt devices
- command: dmsetup wipe_table --force "{{ item }}"
- with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
- when: encrypted_ceph_partuuid.stdout_lines | length > 0
-
- - name: delete dm-crypt devices if any
- command: dmsetup remove --retry --force {{ item }}
- with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
- when: encrypted_ceph_partuuid.stdout_lines | length > 0
-
- - name: get payload_offset
- shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa 306
- register: payload_offset
- with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
- when: encrypted_ceph_partuuid.stdout_lines | length > 0
-
- - name: get physical sector size
- command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
- changed_when: false
- with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
- when: encrypted_ceph_partuuid.stdout_lines | length > 0
- register: phys_sector_size
-
- - name: wipe dmcrypt device
- command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
- changed_when: false
- with_together:
- - "{{ encrypted_ceph_partuuid.stdout_lines }}"
- - "{{ payload_offset.results }}"
- - "{{ phys_sector_size.results }}"
-
- - name: get ceph data partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph data"
- changed_when: false
- failed_when: false
- register: ceph_data_partition_to_erase_path
-
- - name: get ceph lockbox partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph lockbox"
- changed_when: false
- failed_when: false
- register: ceph_lockbox_partition_to_erase_path
-
- - name: see if ceph-volume is installed # noqa : 305
- shell: command -v ceph-volume
- changed_when: false
- failed_when: false
- register: ceph_volume_present
-
- - name: zap and destroy osds created by ceph-volume with lvm_volumes
- ceph_volume:
- data: "{{ item.data }}"
- data_vg: "{{ item.data_vg|default(omit) }}"
- journal: "{{ item.journal|default(omit) }}"
- journal_vg: "{{ item.journal_vg|default(omit) }}"
- db: "{{ item.db|default(omit) }}"
- db_vg: "{{ item.db_vg|default(omit) }}"
- wal: "{{ item.wal|default(omit) }}"
- wal_vg: "{{ item.wal_vg|default(omit) }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
- with_items: "{{ lvm_volumes | default([]) }}"
- when: ceph_volume_present.rc == 0
-
- - name: zap and destroy osds created by ceph-volume with devices
- ceph_volume:
- data: "{{ item }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
- with_items:
- - "{{ devices | default([]) }}"
- - "{{ dedicated_devices | default([]) }}"
- - "{{ bluestore_wal_devices | default([]) }}"
- when: ceph_volume_present.rc == 0
-
- - name: get ceph block partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph block"
- changed_when: false
- failed_when: false
- register: ceph_block_partition_to_erase_path
-
- - name: get ceph journal partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph journal"
- changed_when: false
- failed_when: false
- register: ceph_journal_partition_to_erase_path
-
- - name: get ceph db partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph block.db"
- changed_when: false
- failed_when: false
- register: ceph_db_partition_to_erase_path
-
- - name: get ceph wal partitions
- shell: |
- blkid -o device -t PARTLABEL="ceph block.wal"
- changed_when: false
- failed_when: false
- register: ceph_wal_partition_to_erase_path
-
- - name: set_fact combined_devices_list
- set_fact:
- combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines +
- ceph_lockbox_partition_to_erase_path.stdout_lines +
- ceph_block_partition_to_erase_path.stdout_lines +
- ceph_journal_partition_to_erase_path.stdout_lines +
- ceph_db_partition_to_erase_path.stdout_lines +
- ceph_wal_partition_to_erase_path.stdout_lines }}"
-
- - name: resolve parent device
- command: lsblk --nodeps -no pkname "{{ item }}"
- register: tmp_resolved_parent_device
- changed_when: false
- with_items: "{{ combined_devices_list }}"
-
- - name: set_fact resolved_parent_device
- set_fact:
- resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
-
- - name: wipe partitions
- shell: |
- wipefs --all "{{ item }}"
- dd if=/dev/zero of="{{ item }}" bs=1 count=4096
- changed_when: false
- with_items: "{{ combined_devices_list }}"
-
- - name: zap ceph journal/block db/block wal partitions # noqa 306
- shell: |
- # if the disk passed is a raw device AND the boot system disk
- if parted -s /dev/"{{ item }}" print | grep -sq boot; then
- echo "Looks like /dev/{{ item }} has a boot partition,"
- echo "if you want to delete specific partitions point to the partition instead of the raw device"
- echo "Do not use your system disk!"
- exit 1
- fi
- sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
- dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
- parted -s /dev/"{{ item }}" mklabel gpt
- partprobe /dev/"{{ item }}"
- udevadm settle --timeout=600
- with_items: "{{ resolved_parent_device }}"
- changed_when: false
+ - name: see if ceph-volume is installed # noqa : 305
+ shell: command -v ceph-volume
+ changed_when: false
+ failed_when: false
+ register: ceph_volume_present
+
+ - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ ceph_volume:
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ with_items: "{{ lvm_volumes | default([]) }}"
+ when: ceph_volume_present.rc == 0
+
+ - name: zap and destroy osds created by ceph-volume with devices
+ ceph_volume:
+ data: "{{ item }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ with_items:
+ - "{{ devices | default([]) }}"
+ - "{{ dedicated_devices | default([]) }}"
+ - "{{ bluestore_wal_devices | default([]) }}"
+ when: ceph_volume_present.rc == 0
+
+ - name: get ceph block partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block"
+ changed_when: false
+ failed_when: false
+ register: ceph_block_partition_to_erase_path
-- name: purge ceph mon cluster
+ - name: get ceph journal partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph journal"
+ changed_when: false
+ failed_when: false
+ register: ceph_journal_partition_to_erase_path
- vars:
- mon_group_name: mons
+ - name: get ceph db partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block.db"
+ changed_when: false
+ failed_when: false
+ register: ceph_db_partition_to_erase_path
- hosts: "{{ mon_group_name|default('mons') }}"
+ - name: get ceph wal partitions
+ shell: |
+ blkid -o device -t PARTLABEL="ceph block.wal"
+ changed_when: false
+ failed_when: false
+ register: ceph_wal_partition_to_erase_path
+
+ - name: set_fact combined_devices_list
+ set_fact:
+ combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines +
+ ceph_lockbox_partition_to_erase_path.stdout_lines +
+ ceph_block_partition_to_erase_path.stdout_lines +
+ ceph_journal_partition_to_erase_path.stdout_lines +
+ ceph_db_partition_to_erase_path.stdout_lines +
+ ceph_wal_partition_to_erase_path.stdout_lines }}"
+
+ - name: resolve parent device
+ command: lsblk --nodeps -no pkname "{{ item }}"
+ register: tmp_resolved_parent_device
+ changed_when: false
+ with_items: "{{ combined_devices_list }}"
- gather_facts: false # already gathered previously
+ - name: set_fact resolved_parent_device
+ set_fact:
+ resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
+
+ - name: wipe partitions
+ shell: |
+ wipefs --all "{{ item }}"
+ dd if=/dev/zero of="{{ item }}" bs=1 count=4096
+ changed_when: false
+ with_items: "{{ combined_devices_list }}"
+
+ - name: check parent device partition
+ parted:
+ device: "/dev/{{ item }}"
+ loop: "{{ resolved_parent_device }}"
+ register: parted_info
+
+ - name: fail if there is a boot partition on the device
+ fail:
+ msg: "{{ item.item }} has a boot partition"
+ loop: "{{ parted_info.results }}"
+ when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)"
+
+ - name: zap ceph journal/block db/block wal partitions # noqa 306
+ shell: |
+ sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
+ dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
+ parted -s /dev/"{{ item }}" mklabel gpt
+ partprobe /dev/"{{ item }}"
+ udevadm settle --timeout=600
+ with_items: "{{ resolved_parent_device }}"
+ changed_when: false
- become: true
+- name: purge ceph mon cluster
+ vars:
+ mon_group_name: mons
+ hosts: "{{ mon_group_name|default('mons') }}"
+ gather_facts: false # already gathered previously
+ become: true
tasks:
+ - name: stop ceph mons with systemd
+ service:
+ name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ failed_when: false
+ with_items:
+ - mon
+ - mgr
+
+ - name: remove monitor store and bootstrap keys
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /var/lib/ceph/mon
+ - /var/lib/ceph/bootstrap-mds
+ - /var/lib/ceph/bootstrap-osd
+ - /var/lib/ceph/bootstrap-rgw
+ - /var/lib/ceph/bootstrap-rbd
+ - /var/lib/ceph/bootstrap-mgr
+ - /var/lib/ceph/tmp
- - name: stop ceph mons with systemd
- service:
- name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
- state: stopped
- enabled: no
- failed_when: false
- with_items:
- - mon
- - mgr
-
-
- - name: remove monitor store and bootstrap keys
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /var/lib/ceph/mon
- - /var/lib/ceph/bootstrap-mds
- - /var/lib/ceph/bootstrap-osd
- - /var/lib/ceph/bootstrap-rgw
- - /var/lib/ceph/bootstrap-rbd
- - /var/lib/ceph/bootstrap-mgr
- - /var/lib/ceph/tmp
- name: purge ceph-crash daemons
hosts:
- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
-
vars:
# When set to true both groups of packages are purged.
# This can cause problem with qemu-kvm
purge_all_packages: true
-
ceph_packages:
- ceph
- ceph-common
- ceph-radosgw
- calamari-server
- ceph-grafana-dashboards
-
+ - rbd-mirror
ceph_remaining_packages:
- libcephfs1
- libcephfs2
extra_packages:
- keepalived
- haproxy
-
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ client_group_name|default('clients') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ client_group_name | default('clients') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
- grafana-server
-
gather_facts: false # Already gathered previously
-
become: true
-
handlers:
- - name: get osd data and lockbox mount points
- shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
- register: mounted_osd
- changed_when: false
- listen: "remove data"
-
- - name: umount osd data partition
- mount:
- path: "{{ item }}"
- state: unmounted
- with_items: "{{ mounted_osd.stdout_lines }}"
- listen: "remove data"
-
- - name: remove data
- shell: rm -rf /var/lib/ceph/* # noqa 302
- listen: "remove data"
-
+ - name: get osd data and lockbox mount points
+ shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
+ register: mounted_osd
+ changed_when: false
+ listen: "remove data"
+
+ - name: umount osd data partition
+ mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ mounted_osd.stdout_lines }}"
+ listen: "remove data"
+
+ - name: remove data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ listen: "remove data"
tasks:
+ - name: purge ceph packages with yum
+ yum:
+ name: "{{ ceph_packages }}"
+ state: absent
+ when: ansible_facts['pkg_mgr'] == 'yum'
- - name: purge ceph packages with yum
- yum:
- name: "{{ ceph_packages }}"
- state: absent
- when: ansible_facts['pkg_mgr'] == 'yum'
-
- - name: purge ceph packages with dnf
- dnf:
- name: "{{ ceph_packages }}"
- state: absent
- when: ansible_facts['pkg_mgr'] == 'dnf'
-
- - name: purge ceph packages with apt
- apt:
- name: "{{ ceph_packages }}"
- state: absent
- purge: true
- when: ansible_facts['pkg_mgr'] == 'apt'
-
- - name: purge remaining ceph packages with yum
- yum:
- name: "{{ ceph_remaining_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'yum'
- - purge_all_packages | bool
-
- - name: purge remaining ceph packages with dnf
- dnf:
- name: "{{ ceph_remaining_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'dnf'
- - purge_all_packages | bool
-
- - name: purge remaining ceph packages with apt
- apt:
- name: "{{ ceph_remaining_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'apt'
- - purge_all_packages | bool
-
- - name: purge extra packages with yum
- yum:
- name: "{{ extra_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'yum'
- - purge_all_packages | bool
-
- - name: purge extra packages with dnf
- dnf:
- name: "{{ extra_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'dnf'
- - purge_all_packages | bool
-
- - name: purge extra packages with apt
- apt:
- name: "{{ extra_packages }}"
- state: absent
- when:
- - ansible_facts['pkg_mgr'] == 'apt'
- - purge_all_packages | bool
-
- - name: remove config and any ceph socket left
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/ceph
- - /etc/keepalived
- - /etc/haproxy
- - /run/ceph
-
- - name: remove logs
- file:
- path: /var/log/ceph
- state: absent
-
- - name: request data removal
- command: echo requesting data removal # noqa 301
- become: false
- delegate_to: localhost
- notify: remove data
-
- - name: purge dnf cache
- command: dnf clean all
- when: ansible_facts['pkg_mgr'] == 'dnf'
-
- - name: purge rpm cache in /tmp
- file:
- path: /tmp/rh-storage-repo
- state: absent
-
- - name: clean apt
- command: apt-get clean # noqa 303
- when: ansible_facts['pkg_mgr'] == 'apt'
-
- - name: purge ceph repo file in /etc/yum.repos.d
- file:
- path: '/etc/yum.repos.d/{{ item }}.repo'
- state: absent
- with_items:
- - ceph-dev
- - ceph_stable
- - rh_storage
- when: ansible_facts['os_family'] == 'RedHat'
-
- - name: check for anything running ceph
- command: "ps -u ceph -U ceph"
- register: check_for_running_ceph
- changed_when: false
- failed_when: check_for_running_ceph.rc == 0
-
- - name: find ceph systemd unit files to remove
- find:
- paths: "/etc/systemd/system"
- pattern: "ceph*"
- recurse: true
- file_type: any
- register: systemd_files
-
- - name: remove ceph systemd unit files
- file:
- path: "{{ item.path }}"
- state: absent
- with_items: "{{ systemd_files.files }}"
- when: ansible_facts['service_mgr'] == 'systemd'
+ - name: purge ceph packages with dnf
+ dnf:
+ name: "{{ ceph_packages }}"
+ state: absent
+ when: ansible_facts['pkg_mgr'] == 'dnf'
+ - name: purge ceph packages with apt
+ apt:
+ name: "{{ ceph_packages }}"
+ state: absent
+ purge: true
+ when: ansible_facts['pkg_mgr'] == 'apt'
-- name: purge fetch directory
+ - name: purge remaining ceph packages with yum
+ yum:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'yum'
+ - purge_all_packages | bool
- hosts: localhost
+ - name: purge remaining ceph packages with dnf
+ dnf:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'dnf'
+ - purge_all_packages | bool
- gather_facts: false
+ - name: purge remaining ceph packages with apt
+ apt:
+ name: "{{ ceph_remaining_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'apt'
+ - purge_all_packages | bool
- tasks:
+ - name: purge extra packages with yum
+ yum:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'yum'
+ - purge_all_packages | bool
+
+ - name: purge extra packages with dnf
+ dnf:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'dnf'
+ - purge_all_packages | bool
+
+ - name: purge extra packages with apt
+ apt:
+ name: "{{ extra_packages }}"
+ state: absent
+ when:
+ - ansible_facts['pkg_mgr'] == 'apt'
+ - purge_all_packages | bool
+
+ - name: remove config and any ceph socket left
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ceph
+ - /etc/keepalived
+ - /etc/haproxy
+ - /run/ceph
- - name: set fetch_directory value if not set
- set_fact:
- fetch_directory: "fetch/"
- when: fetch_directory is not defined
+ - name: remove logs
+ file:
+ path: /var/log/ceph
+ state: absent
+
+ - name: request data removal
+ command: echo requesting data removal # noqa 301
+ become: false
+ delegate_to: localhost
+ notify: remove data
+
+ - name: purge dnf cache
+ command: dnf clean all
+ when: ansible_facts['pkg_mgr'] == 'dnf'
- - name: purge fetch directory for localhost
- file:
- path: "{{ fetch_directory | default('fetch/') }}"
- state: absent
+ - name: clean apt
+ command: apt-get clean # noqa 303
+ when: ansible_facts['pkg_mgr'] == 'apt'
+
+ - name: purge ceph repo file in /etc/yum.repos.d
+ file:
+ path: '/etc/yum.repos.d/{{ item }}.repo'
+ state: absent
+ with_items:
+ - ceph-dev
+ - ceph_stable
+ when: ansible_facts['os_family'] == 'RedHat'
+
+ - name: check for anything running ceph
+ command: "ps -u ceph -U ceph"
+ register: check_for_running_ceph
+ changed_when: false
+ failed_when: check_for_running_ceph.rc == 0
+
+ - name: find ceph systemd unit files to remove
+ find:
+ paths: "/etc/systemd/system"
+ pattern: "ceph*"
+ recurse: true
+ file_type: any
+ register: systemd_files
+
+ - name: remove ceph systemd unit files
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items: "{{ systemd_files.files }}"
+ when: ansible_facts['service_mgr'] == 'systemd'
+
+
+- name: purge fetch directory
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: set fetch_directory value if not set
+ set_fact:
+ fetch_directory: "fetch/"
+ when: fetch_directory is not defined
+
+ - name: purge fetch directory for localhost
+ file:
+ path: "{{ fetch_directory | default('fetch/') }}"
+ state: absent
# It removes: packages, containers, configuration files and ALL THE DATA
- name: confirm whether user really meant to purge the cluster
-
hosts: localhost
-
gather_facts: false
-
vars_prompt:
- name: ireallymeanit
prompt: >
Do you want to continue?
default: 'no'
private: no
-
tasks:
- - name: exit playbook, if user did not mean to purge cluster
- fail:
- msg: >
- "Exiting purge-container-cluster playbook, cluster was NOT purged.
- To purge the cluster, either say 'yes' on the prompt or
- or use `-e ireallymeanit=yes` on the command line when
- invoking the playbook"
- when: ireallymeanit != 'yes'
-
- - name: set ceph_docker_registry value if not set
- set_fact:
- ceph_docker_registry: "docker.io"
- when: ceph_docker_registry is not defined
+ - name: exit playbook, if user did not mean to purge cluster
+ fail:
+ msg: >
+ "Exiting purge-container-cluster playbook, cluster was NOT purged.
+ To purge the cluster, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
+
+ - name: set ceph_docker_registry value if not set
+ set_fact:
+ ceph_docker_registry: "docker.io"
+ when: ceph_docker_registry is not defined
+
- name: check there's no ceph kernel threads present
hosts: "{{ client_group_name|default('clients') }}"
become: true
any_errors_fatal: true
-
tasks:
- import_role:
name: ceph-defaults
- name: purge ceph nfs cluster
-
- hosts: "{{ nfs_group_name|default('nfss') }}"
-
+ hosts: "{{ nfs_group_name | default('nfss') }}"
become: true
-
tasks:
+ - name: disable ceph nfs service
+ service:
+ name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
- - name: disable ceph nfs service
- service:
- name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
- state: stopped
- enabled: no
- ignore_errors: true
-
- - name: remove ceph nfs service
- file:
- path: /etc/systemd/system/ceph-nfs@.service
- state: absent
+ - name: remove ceph nfs service
+ file:
+ path: /etc/systemd/system/ceph-nfs@.service
+ state: absent
- - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/ganesha
- - /var/lib/nfs/ganesha
- - /var/run/ganesha
+ - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ganesha
+ - /var/lib/nfs/ganesha
+ - /var/run/ganesha
- name: purge ceph mds cluster
-
- hosts: "{{ mds_group_name|default('mdss') }}"
-
+ hosts: "{{ mds_group_name | default('mdss') }}"
become: true
-
tasks:
+ - name: disable ceph mds service
+ service:
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
- - name: disable ceph mds service
- service:
- name: "ceph-mds@{{ ansible_facts['hostname'] }}"
- state: stopped
- enabled: no
- ignore_errors: true
-
- - name: remove ceph mds service
- file:
- path: /etc/systemd/system/ceph-mds@.service
- state: absent
+ - name: remove ceph mds service
+ file:
+ path: /etc/systemd/system/ceph-mds@.service
+ state: absent
- name: purge ceph iscsigws cluster
-
- hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
+ hosts: "{{ iscsi_gw_group_name | default('iscsigws') }}"
become: true
tasks:
+ - name: disable ceph iscsigw services
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
- - name: disable ceph iscsigw services
- service:
- name: "{{ item }}"
- state: stopped
- enabled: no
- ignore_errors: true
- with_items:
- - rbd-target-api
- - rbd-target-gw
- - tcmu-runner
-
- - name: remove ceph iscsigw systemd unit files
- file:
- path: /etc/systemd/system/{{ item }}.service
- state: absent
- ignore_errors: true
- with_items:
- - rbd-target-api
- - rbd-target-gw
- - tcmu-runner
+ - name: remove ceph iscsigw systemd unit files
+ file:
+ path: /etc/systemd/system/{{ item }}.service
+ state: absent
+ ignore_errors: true
+ with_items:
+ - rbd-target-api
+ - rbd-target-gw
+ - tcmu-runner
- name: purge ceph mgr cluster
-
- hosts: "{{ mgr_group_name|default('mgrs') }}"
+ hosts: "{{ mgr_group_name | default('mgrs') }}"
become: true
tasks:
+ - name: disable ceph mgr service
+ service:
+ name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
- - name: disable ceph mgr service
- service:
- name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
- state: stopped
- enabled: no
- ignore_errors: true
-
- - name: remove ceph mgr service
- file:
- path: /etc/systemd/system/ceph-mgr@.service
- state: absent
+ - name: remove ceph mgr service
+ file:
+ path: /etc/systemd/system/ceph-mgr@.service
+ state: absent
- name: purge ceph rgw cluster
-
- hosts: "{{ rgw_group_name|default('rgws') }}"
-
+ hosts: "{{ rgw_group_name | default('rgws') }}"
become: true
-
tasks:
- import_role:
name: ceph-defaults
- name: purge ceph rbd-mirror cluster
+ hosts: "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ become: true
+ tasks:
+ - name: disable ceph rbd-mirror service
+ service:
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
- hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
+ - name: remove ceph rbd-mirror service
+ file:
+ path: /etc/systemd/system/ceph-rbd-mirror@.service
+ state: absent
- become: true
+- name: purge ceph osd cluster
+ hosts: "{{ osd_group_name | default('osds') }}"
+ gather_facts: true
+ become: true
tasks:
+ - import_role:
+ name: ceph-defaults
- - name: disable ceph rbd-mirror service
- service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
- state: stopped
- enabled: no
- ignore_errors: true
+ - name: gather monitors facts
+ setup:
+ gather_subset:
+ - 'all'
+ - '!facter'
+ - '!ohai'
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
- - name: remove ceph rbd-mirror service
- file:
- path: /etc/systemd/system/ceph-rbd-mirror@.service
- state: absent
+ - import_role:
+ name: ceph-facts
+ - name: get all the running osds
+ shell: systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service" # noqa 303
+ register: osd_units
+ changed_when: false
+ ignore_errors: true
-- name: purge ceph osd cluster
+ - name: disable ceph osd service
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ osd_units.stdout_lines }}"
- hosts: "{{ osd_group_name | default('osds') }}"
+ - name: remove osd mountpoint tree
+ file:
+ path: /var/lib/ceph/osd/
+ state: absent
+ ignore_errors: true
- gather_facts: true
- become: true
+ - name: default lvm_volumes if not defined
+ set_fact:
+ lvm_volumes: []
+ when: lvm_volumes is not defined
+
+ - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ ceph_volume:
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items: "{{ lvm_volumes }}"
+ when: lvm_volumes | default([]) | length > 0
+
+ - name: zap and destroy osds created by ceph-volume with devices
+ ceph_volume:
+ data: "{{ item }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
+ with_items:
+ - "{{ devices | default([]) }}"
+ - "{{ dedicated_devices | default([]) }}"
+ - "{{ bluestore_wal_devices | default([]) }}"
- tasks:
+ - name: remove ceph osd service
+ file:
+ path: /etc/systemd/system/ceph-osd@.service
+ state: absent
- - import_role:
- name: ceph-defaults
-
- - name: gather monitors facts
- setup:
- gather_subset:
- - 'all'
- - '!facter'
- - '!ohai'
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
-
- - import_role:
- name: ceph-facts
-
- - name: get all the running osds
- shell: systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service" # noqa 303
- register: osd_units
- changed_when: false
- ignore_errors: true
-
- - name: disable ceph osd service
- service:
- name: "{{ item }}"
- state: stopped
- enabled: no
- with_items: "{{ osd_units.stdout_lines }}"
-
- - name: remove osd mountpoint tree
- file:
- path: /var/lib/ceph/osd/
- state: absent
- register: remove_osd_mountpoints
- ignore_errors: true
-
- - name: default lvm_volumes if not defined
- set_fact:
- lvm_volumes: []
- when: lvm_volumes is not defined
-
- - name: zap and destroy osds created by ceph-volume with lvm_volumes
- ceph_volume:
- data: "{{ item.data }}"
- data_vg: "{{ item.data_vg|default(omit) }}"
- journal: "{{ item.journal|default(omit) }}"
- journal_vg: "{{ item.journal_vg|default(omit) }}"
- db: "{{ item.db|default(omit) }}"
- db_vg: "{{ item.db_vg|default(omit) }}"
- wal: "{{ item.wal|default(omit) }}"
- wal_vg: "{{ item.wal_vg|default(omit) }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- with_items: "{{ lvm_volumes }}"
- when: lvm_volumes | default([]) | length > 0
-
- - name: zap and destroy osds created by ceph-volume with devices
- ceph_volume:
- data: "{{ item }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- with_items:
- - "{{ devices | default([]) }}"
- - "{{ dedicated_devices | default([]) }}"
- - "{{ bluestore_wal_devices | default([]) }}"
-
- - name: remove ceph osd service
- file:
- path: /etc/systemd/system/ceph-osd@.service
- state: absent
-
- - name: include vars from group_vars/osds.yml
- include_vars:
- file: "{{ item }}"
- with_first_found:
- - files:
- - "{{ playbook_dir }}/group_vars/osds"
- - "{{ playbook_dir }}/group_vars/osds.yml"
- skip: true
+ - name: include vars from group_vars/osds.yml
+ include_vars:
+ file: "{{ item }}"
+ with_first_found:
+ - files:
+ - "{{ playbook_dir }}/group_vars/osds"
+ - "{{ playbook_dir }}/group_vars/osds.yml"
+ skip: true
-- name: purge ceph mon cluster
+- name: purge ceph mon cluster
hosts: "{{ mon_group_name|default('mons') }}"
-
become: true
-
tasks:
+ # since mgr are now collocated with mons by default
+ - name: disable ceph mon and mgr service
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ ignore_errors: true
+ with_items:
+ - "ceph-mgr@{{ ansible_facts['hostname'] }}"
+ - "ceph-mon@{{ ansible_facts['hostname'] }}"
- # since mgr are now collocated with mons by default
- - name: disable ceph mon and mgr service
- service:
- name: "{{ item }}"
- state: stopped
- enabled: no
- ignore_errors: true
- with_items:
- - "ceph-mgr@{{ ansible_facts['hostname'] }}"
- - "ceph-mon@{{ ansible_facts['hostname'] }}"
-
- - name: remove ceph mon and mgr service
- file:
- path: "/etc/systemd/system/ceph-{{ item }}@.service"
- state: absent
- with_items:
- - mon
- - mgr
+ - name: remove ceph mon and mgr service
+ file:
+ path: "/etc/systemd/system/ceph-{{ item }}@.service"
+ state: absent
+ with_items:
+ - mon
+ - mgr
- name: purge node-exporter
-
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
- grafana-server
- iscsigws
- clients
-
gather_facts: false
-
become: true
-
tasks:
- import_role:
name: ceph-defaults
- remove_img
when: dashboard_enabled | bool
-- name: purge ceph-grafana
+- name: purge ceph-grafana
hosts: grafana-server
-
gather_facts: false
-
become: true
-
vars:
grafana_services:
- grafana-server
- prometheus
- alertmanager
-
tasks:
- import_role:
name: ceph-defaults
failed_when: false
when: dashboard_enabled | bool
+
- name: purge ceph-crash containers
hosts:
- "{{ mon_group_name | default('mons') }}"
path: /var/lib/ceph/crash
state: absent
-- name: check container hosts
+- name: check container hosts
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
-
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
gather_facts: true
become: true
-
tasks:
+ - import_role:
+ name: ceph-defaults
- - import_role:
- name: ceph-defaults
+ - import_role:
+ name: ceph-defaults
- - import_role:
- name: ceph-facts
+ - import_role:
+ name: ceph-facts
- - name: show container list on all the nodes (should be empty)
- command: >
- {{ container_binary }} ps --filter='name=ceph' -a -q
- register: containers_list
- changed_when: false
+ - name: show container list on all the nodes (should be empty)
+ command: >
+ {{ container_binary }} ps --filter='name=ceph' -a -q
+ register: containers_list
+ changed_when: false
- - name: show container images on all the nodes (should be empty if tags was passed remove_img)
- command: >
- {{ container_binary }} images
- register: images_list
- changed_when: false
+ - name: show container images on all the nodes (should be empty if tags was passed remove_img)
+ command: >
+ {{ container_binary }} images
+ register: images_list
+ changed_when: false
- - name: fail if container are still present
- fail:
- msg: "It looks like container are still present."
- when: containers_list.stdout_lines|length > 0
+ - name: fail if container are still present
+ fail:
+ msg: "It looks like container are still present."
+ when: containers_list.stdout_lines|length > 0
- name: final cleanup
-
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
- - "{{ grafana_server_group_name|default('grafana-server') }}"
-
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
+ - "{{ grafana_server_group_name | default('grafana-server') }}"
become: true
-
tags: with_pkg
-
tasks:
- import_role:
name: ceph-defaults
- name: purge ceph directories
-
hosts:
- - "{{ mon_group_name|default('mons') }}"
- - "{{ osd_group_name|default('osds') }}"
- - "{{ mds_group_name|default('mdss') }}"
- - "{{ rgw_group_name|default('rgws') }}"
- - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
- - "{{ nfs_group_name|default('nfss') }}"
- - "{{ mgr_group_name|default('mgrs') }}"
-
+ - "{{ mon_group_name | default('mons') }}"
+ - "{{ osd_group_name | default('osds') }}"
+ - "{{ mds_group_name | default('mdss') }}"
+ - "{{ rgw_group_name | default('rgws') }}"
+ - "{{ rbdmirror_group_name | default('rbdmirrors') }}"
+ - "{{ nfs_group_name | default('nfss') }}"
+ - "{{ mgr_group_name | default('mgrs') }}"
gather_facts: false # Already gathered previously
-
become: true
-
tasks:
- - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
- file:
- path: "{{ item }}"
- state: absent
- with_items:
- - /etc/ceph
- - /var/log/ceph
- - /run/ceph
- - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
-
- - name: remove ceph data
- shell: rm -rf /var/lib/ceph/* # noqa 302
- changed_when: false
-
- # (todo): remove this when we are able to manage docker
- # service on atomic host.
- - name: remove docker data
- shell: rm -rf /var/lib/docker/* # noqa 302
- when: not is_atomic | bool
+ - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /etc/ceph
+ - /var/log/ceph
+ - /run/ceph
+ - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
-- name: purge fetch directory
+ - name: remove ceph data
+ shell: rm -rf /var/lib/ceph/* # noqa 302
+ changed_when: false
- hosts: localhost
+ # (todo): remove this when we are able to manage docker
+ # service on atomic host.
+ - name: remove docker data
+ shell: rm -rf /var/lib/docker/* # noqa 302
+ when: not is_atomic | bool
- gather_facts: false
+- name: purge fetch directory
+ hosts: localhost
+ gather_facts: false
tasks:
+ - name: set fetch_directory value if not set
+ set_fact:
+ fetch_directory: "fetch/"
+ when: fetch_directory is not defined
- - name: set fetch_directory value if not set
- set_fact:
- fetch_directory: "fetch/"
- when: fetch_directory is not defined
-
- - name: purge fetch directory for localhost
- file:
- path: "{{ fetch_directory | default('fetch/') }}/"
- state: absent
+ - name: purge fetch directory for localhost
+ file:
+ path: "{{ fetch_directory | default('fetch/') }}/"
+ state: absent