]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
purge: ensure no ceph kernel thread is present
authorGuillaume Abrioux <gabrioux@redhat.com>
Fri, 21 Jun 2019 14:10:16 +0000 (16:10 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Mon, 24 Jun 2019 08:05:11 +0000 (10:05 +0200)
This tries to first unmount any cephfs/nfs-ganesha mount point on client
nodes, then unmap any mapped rbd devices and finally it tries to remove
ceph kernel modules.
If it fails it means some resources are still busy and should be cleaned
manually before continuing to purge the cluster.
This is done early in the playbook so the cluster stays untouched until
everything is ready for that operation, otherwise if you try to redeploy
a cluster it could end up by getting confused by leftover from previous
deployment.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1337915
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/purge-docker-cluster.yml

index a2b0f97f5692debaa73f172c2053f5a7da02b542..5904ba73a0c51664afb9c501d8fc19d9fc8d15c9 100644 (file)
   tasks:
     - debug: msg="gather facts on all Ceph hosts for following reference"
 
+- name: check there's no ceph kernel threads present
+  hosts: "{{ client_group_name|default('clients') }}"
+  become: true
+  any_errors_fatal: true
+
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - block:
+        - name: get nfs nodes ansible facts
+          setup:
+          delegate_to: "{{ item }}"
+          delegate_facts: True
+          with_items: "{{ groups[nfs_group_name] }}"
+          run_once: true
+
+        - name: get all nfs-ganesha mount points
+          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(public_network) | first }}" /proc/mounts
+          register: nfs_ganesha_mount_points
+          failed_when: false
+          with_items: "{{ groups[nfs_group_name] }}"
+
+        - name: ensure nfs-ganesha mountpoint(s) are unmounted
+          mount:
+            path: "{{ item.split(' ')[1] }}"
+            state: unmounted
+          with_items:
+            - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
+          when: item | length > 0
+      when: groups[nfs_group_name] | default([]) | length > 0
+
+    - name: ensure cephfs mountpoint(s) are unmounted
+      command: umount -a -t ceph
+
+    - name: ensure rbd devices are unmapped
+      command: rbdmap unmap-all
+
+    - name: unload ceph kernel modules
+      modprobe:
+        name: "{{ item }}"
+        state: absent
+      with_items:
+        - rbd
+        - ceph
+        - libceph
+
+- name: purge ceph nfs cluster
+
+  vars:
+    nfs_group_name: nfss
+
+  hosts: "{{ nfs_group_name|default('nfss') }}"
+
+  gather_facts: false # Already gathered previously
+
+  become: true
+
+  tasks:
+
+  - name: stop ceph nfss with systemd
+    service:
+      name: nfs-ganesha
+      state: stopped
+    failed_when: false
+    when: ansible_service_mgr == 'systemd'
 
 - name: purge node-exporter
   hosts:
     failed_when: false
 
 
-- name: purge ceph nfs cluster
-
-  vars:
-    nfs_group_name: nfss
-
-  hosts: "{{ nfs_group_name|default('nfss') }}"
-
-  gather_facts: false # Already gathered previously
-
-  become: true
-
-  tasks:
-
-  - name: stop ceph nfss with systemd
-    service:
-      name: nfs-ganesha
-      state: stopped
-    failed_when: false
-    when: ansible_service_mgr == 'systemd'
-
-
 - name: purge ceph osd cluster
 
   vars:
index 6a1ed7836a0565e74f8bdb6a57103b17a1a810d8..de6b443f99b7ba1c2cccbd1a361106b35955723e 100644 (file)
       ceph_docker_registry: "docker.io"
     when: ceph_docker_registry is not defined
 
+- name: check there's no ceph kernel threads present
+  hosts: "{{ client_group_name|default('clients') }}"
+  become: true
+  any_errors_fatal: true
+
+  tasks:
+    - import_role:
+        name: ceph-defaults
+
+    - block:
+        - name: get nfs nodes ansible facts
+          setup:
+          delegate_to: "{{ item }}"
+          delegate_facts: True
+          with_items: "{{ groups[nfs_group_name] }}"
+          run_once: true
+
+        - name: get all nfs-ganesha mount points
+          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ipaddr(public_network) | first }}" /proc/mounts
+          register: nfs_ganesha_mount_points
+          failed_when: false
+          with_items: "{{ groups[nfs_group_name] }}"
+
+        - name: ensure nfs-ganesha mountpoint(s) are unmounted
+          mount:
+            path: "{{ item.split(' ')[1] }}"
+            state: unmounted
+          with_items:
+            - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
+          when: item | length > 0
+      when: groups[nfs_group_name] | default([]) | length > 0
+
+    - name: ensure cephfs mountpoint are unmounted
+      command: umount -a -t ceph
+
+    - name: ensure rbd devices are unmapped
+      command: rbdmap unmap-all
+
+    - name: unload ceph kernel modules
+      modprobe:
+        name: "{{ item }}"
+        state: absent
+      with_items:
+        - rbd
+        - ceph
+        - libceph
+
+
+- name: purge ceph nfs cluster
+
+  hosts: "{{ nfs_group_name|default('nfss') }}"
+
+  become: true
+
+  tasks:
+
+  - name: disable ceph nfs service
+    service:
+      name: "ceph-nfs@{{ ansible_hostname }}"
+      state: stopped
+      enabled: no
+    ignore_errors: true
+
+  - name: remove ceph nfs container
+    docker_container:
+      image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      name: "ceph-nfs-{{ ansible_hostname }}"
+      state: absent
+    ignore_errors: true
+
+  - name: remove ceph nfs service
+    file:
+      path: /etc/systemd/system/ceph-nfs@.service
+      state: absent
+
+  - name: remove ceph nfs directories for "{{ ansible_hostname }}"
+    file:
+      path: "{{ item }}"
+      state: absent
+    with_items:
+      - /etc/ganesha
+      - /var/lib/nfs/ganesha
+      - /var/run/ganesha
+
+  - name: remove ceph nfs image
+    docker_image:
+      state: absent
+      repository: "{{ ceph_docker_registry }}"
+      name: "{{ ceph_docker_image }}"
+      tag: "{{ ceph_docker_image_tag }}"
+      force: yes
+    tags: remove_img
+
 - name: purge ceph mds cluster
 
   hosts: "{{ mds_group_name|default('mdss') }}"
     tags: remove_img
 
 
-- name: purge ceph nfs cluster
-
-  hosts: "{{ nfs_group_name|default('nfss') }}"
-
-  become: true
-
-  tasks:
-
-  - name: disable ceph nfs service
-    service:
-      name: "ceph-nfs@{{ ansible_hostname }}"
-      state: stopped
-      enabled: no
-    ignore_errors: true
-
-  - name: remove ceph nfs container
-    docker_container:
-      image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-      name: "ceph-nfs-{{ ansible_hostname }}"
-      state: absent
-    ignore_errors: true
-
-  - name: remove ceph nfs service
-    file:
-      path: /etc/systemd/system/ceph-nfs@.service
-      state: absent
-
-  - name: remove ceph nfs directories for "{{ ansible_hostname }}"
-    file:
-      path: "{{ item }}"
-      state: absent
-    with_items:
-      - /etc/ganesha
-      - /var/lib/nfs/ganesha
-      - /var/run/ganesha
-
-  - name: remove ceph nfs image
-    docker_image:
-      state: absent
-      repository: "{{ ceph_docker_registry }}"
-      name: "{{ ceph_docker_image }}"
-      tag: "{{ ceph_docker_image_tag }}"
-      force: yes
-    tags: remove_img
-
-
 - name: purge ceph osd cluster
 
   hosts: "{{ osd_group_name | default('osds') }}"