From e371bd591c45ad9e18fda9fcd938b736044146e2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?S=C3=A9bastien=20Han?= Date: Fri, 27 Jan 2017 11:33:37 +0100 Subject: [PATCH] purge: fix ubuntu purge when not using systemd MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit We now rely on the cli tool ceph-detect-init which will tell us the init system in used on the distribution. We do this instead of the previous lookup for systemd unit files to call the right task depending on the init system. Signed-off-by: Sébastien Han --- infrastructure-playbooks/purge-cluster.yml | 107 ++++++++------------- 1 file changed, 38 insertions(+), 69 deletions(-) diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 0de3f002e..6911fc51e 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -31,8 +31,7 @@ invoking the playbook" when: ireallymeanit != 'yes' - -- name: gather facts and check if using systemd +- name: gather facts and check init system vars: mon_group_name: mons @@ -53,10 +52,9 @@ become: true tasks: - - name: are we using systemd - shell: "if [ -d /usr/lib/systemd ] ; then find /usr/lib/systemd/system -name 'ceph*' | wc -l ; else echo 0 ; fi" - register: systemd_unit_files - + - name: detect init system + command: ceph-detect-init + register: init_system - name: purge ceph mds cluster @@ -83,28 +81,23 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph mdss with systemd service: name: ceph-mds@{{ ansible_hostname }} state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' - name: stop ceph mdss shell: "service ceph status mds ; if [ $? == 0 ] ; then service ceph stop mds ; else echo ; fi" - when: ansible_os_family == 'RedHat' + when: init_system.stdout == 'sysvinit' -# Ubuntu 14.04 - name: stop ceph mdss on ubuntu command: initctl stop ceph-mds cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - name: purge ceph rgw cluster @@ -132,28 +125,23 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph rgws with systemd service: name: ceph-radosgw@rgw.{{ ansible_hostname }} state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' - name: stop ceph rgws shell: "service ceph-radosgw status ; if [ $? == 0 ] ; then service ceph-radosgw stop ; else echo ; fi" - when: ansible_os_family == 'RedHat' + when: init_system.stdout == 'sysvinit' -# Ubuntu 14.04 - name: stop ceph rgws on ubuntu command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - name: purge ceph rbd-mirror cluster @@ -181,23 +169,18 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph rbd mirror with systemd service: name: ceph-rbd-mirror@admin.service state: stopped - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' -# Ubuntu 14.04 - name: stop ceph rbd mirror on ubuntu command: initctl stop ceph-rbd-mirror cluster={{ cluster }} id=admin failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - name: purge ceph nfs cluster @@ -225,27 +208,22 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph nfss with systemd service: name: nfs-ganesha state: stopped - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' - name: stop ceph nfss shell: "service nfs-ganesha status ; if [ $? == 0 ] ; then service nfs-ganesha stop ; else echo ; fi" - when: ansible_os_family == 'RedHat' + when: init_system.stdout == 'sysvinit' -# Ubuntu 14.04 - name: stop ceph nfss on ubuntu command: initctl stop nfs-ganesha failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - name: purge ceph osd cluster @@ -299,9 +277,7 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph-osd with systemd service: @@ -309,26 +285,23 @@ state: stopped enabled: no with_items: "{{ osd_ids.stdout_lines }}" - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' -# before infernalis release, using sysvinit scripts -# we use this test so we do not have to know which RPM contains the boot script -# or where it is placed. + # before infernalis release, using sysvinit scripts + # we use this test so we do not have to know which RPM contains the boot script + # or where it is placed. - name: stop ceph osds shell: "service ceph status osd ; if [ $? == 0 ] ; then service ceph stop osd ; else echo ; fi" - when: ansible_os_family == 'RedHat' + when: init_system.stdout == 'sysvinit' -# Ubuntu 14.04 - name: stop ceph osds on ubuntu shell: | for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do initctl stop ceph-osd cluster={{ cluster }} id=$id done failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' with_items: "{{ osd_ids.stdout_lines }}" - name: see if ceph-disk-created data partitions are present @@ -472,27 +445,23 @@ name: ceph.target state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: ansible_service_mgr == 'systemd' - name: stop ceph mons with systemd service: name: ceph-mon@{{ ansible_hostname }} state: stopped enabled: no - when: - - ansible_os_family == 'RedHat' - - systemd_unit_files.stdout != "0" + when: init_system.stdout == 'systemd' - name: stop ceph mons shell: "service ceph status mon ; if [ $? == 0 ] ; then service ceph stop mon ; else echo ; fi" - when: ansible_os_family == 'RedHat' + when: init_system.stdout == 'sysvinit' - name: stop ceph mons on ubuntu command: initctl stop ceph-mon cluster={{ cluster }} id={{ ansible_hostname }} failed_when: false - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - name: remove monitor store and bootstrap keys file: @@ -509,8 +478,8 @@ rbdmirror_group_name: rbd-mirrors nfs_group_name: nfss -# When set to true both groups of packages are purged. -# This can cause problem with qemu-kvm + # When set to true both groups of packages are purged. + # This can cause problem with qemu-kvm purge_all_packages: true ceph_packages: @@ -614,15 +583,15 @@ path: /var/log/ceph state: absent - - name: remove from SysV + - name: remove from sysv shell: "update-rc.d -f ceph remove" - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'sysvinit' - - name: remove Upstart and SysV files + - name: remove upstart and sysv files shell: "find /etc -name '*ceph*' -delete" - when: ansible_distribution == 'Ubuntu' + when: init_system.stdout == 'upstart' - - name: remove Upstart and apt logs and cache + - name: remove upstart and apt logs and cache shell: "find /var -name '*ceph*' -delete" when: ansible_distribution == 'Ubuntu' @@ -636,7 +605,7 @@ command: dnf clean all when: ansible_pkg_mgr == 'dnf' - - name: purge RPM cache in /tmp + - name: purge rpm cache in /tmp file: path: /tmp/rh-storage-repo state: absent -- 2.39.5