]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
purge-cluster: adds support for purging lvm osds 1797/head
authorAndrew Schoen <aschoen@redhat.com>
Wed, 23 Aug 2017 14:12:40 +0000 (09:12 -0500)
committerAndrew Schoen <aschoen@redhat.com>
Wed, 23 Aug 2017 15:33:35 +0000 (10:33 -0500)
This also adds a new testing scenario for purging lvm osds

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
infrastructure-playbooks/purge-cluster.yml
tox.ini

index 0dbba2aa954b64fd28ec3e3933697a8f020e0adc..0ec3e1638fb6c263b99a7cfb1302fed3f305b3c7 100644 (file)
 
   tasks:
 
+  - name: set devices if osd scenario is lvm
+    set_fact:
+      devices: []
+    when: osd_scenario == "lvm"
+
   - name: check for a device list
     fail:
       msg: "OSD automatic discovery was detected, purge cluster does not support this scenario. If you want to purge the cluster, manually provide the list of devices in group_vars/{{ osd_group_name }} using the devices variable."
     when:
       - devices|length == 0
-      - osd_auto_discovery
+      - osd_auto_discovery|default(false)
 
   - name: get osd numbers
     shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
       - ceph_disk_present.rc == 0
       - ceph_data_partlabels.rc == 0
 
+  # this should go away once 'ceph-volume lvm zap' is available
+  - name: remove osd logical volumes
+    command: "lvremove -f {{ item.data_vg }}/{{ item.data }}"
+    with_items: "{{ lvm_volumes }}"
+    when:
+      - osd_scenario == "lvm"
+
+  # this should go away once 'ceph-volume lvm zap' is available
+  - name: remove osd lvm journals
+    command: "lvremove -f {{ item.journal_vg }}/{{ item.journal }}"
+    with_items: "{{ lvm_volumes }}"
+    # journals might be logical volumes, but they could also be
+    # devices so fail silently if this doesn't work
+    failed_when: false
+    when:
+      - osd_scenario == "lvm"
+      - item.journal_vg is defined
+
   - name: get ceph journal partitions
     shell: |
       blkid | awk '/ceph journal/ { sub (":", "", $1); print $1 }'
diff --git a/tox.ini b/tox.ini
index 2e81e186c2a8e6e05ec6692a575b82d097446920..5dfc1741adce626302c8a37bc46867acbc2f10c5 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster}
-  {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds}
+  {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds}
 
 skipsdist = True
 
@@ -20,6 +20,35 @@ commands=
       ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
       ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
   "
+
+  # set up the cluster again
+  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+      ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
+      fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
+      ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
+      ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
+      ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
+      ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
+      ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
+  "
+  # test that the cluster can be redeployed in a healthy state
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
+
+[purge-lvm]
+commands=
+  cp {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml}
+  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
+      ireallymeanit=yes \
+      remove_packages=yes \
+      ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
+      fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
+      ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
+      ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
+      ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest} \
+  "
+
+  ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+
   # set up the cluster again
   ansible-playbook -vv -i {changedir}/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
       ceph_stable_release={env:CEPH_STABLE_RELEASE:kraken} \
@@ -118,6 +147,7 @@ changedir=
   bluestore_docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/bs-dock-ded-jrn
   bluestore_docker_dmcrypt_journal_collocation: {toxinidir}/tests/functional/centos/7/bs-dock-crypt-jrn-col
   lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
+  purge_lvm_osds: {toxinidir}/tests/functional/centos/7/lvm-osds
 
 commands=
   rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
@@ -127,6 +157,7 @@ commands=
   bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
 
   lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  purge_lvm_osds: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
 
   rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
 
@@ -145,6 +176,7 @@ commands=
   testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests
 
   purge_cluster: {[purge]commands}
+  purge_lvm_osds: {[purge-lvm]commands}
   purge_dmcrypt: {[purge]commands}
   purge_docker_cluster: {[purge]commands}
   update_dmcrypt: {[update]commands}