From 26ca6c129a70cd28aa0ed2dae552fb91a251ad21 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Thu, 9 Aug 2018 16:49:43 -0400 Subject: [PATCH] ceph-volume tests.functional add bluestore,filestore playbooks Signed-off-by: Alfredo Deza --- .../batch/playbooks/test_bluestore.yml | 46 +++++++++++++++ .../playbooks/test_bluestore_dmcrypt.yml | 46 +++++++++++++++ .../batch/playbooks/test_filestore.yml | 46 +++++++++++++++ .../playbooks/test_filestore_dmcrypt.yml | 46 +++++++++++++++ .../tests/functional/batch/tox.ini | 56 +++++++++++++++++++ 5 files changed, 240 insertions(+) create mode 100644 src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml new file mode 100644 index 0000000000000..85c702e380294 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml @@ -0,0 +1,46 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.1 + command: "ceph osd purge osd.1 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph osd purge osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap /dev/sdd + command: "ceph-volume lvm zap /dev/sdb --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + + - name: zap /dev/sdc + command: "ceph-volume lvm zap /dev/sdc --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create /dev/sdb and /dev/sdc again + command: "ceph-volume lvm batch --yes --bluestore /dev/sdb /dev/sdc" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml new file mode 100644 index 0000000000000..9e1a73f6535f3 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml @@ -0,0 +1,46 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.1 + command: "ceph osd purge osd.1 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph osd purge osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap /dev/sdd + command: "ceph-volume lvm zap /dev/sdb --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + + - name: zap /dev/sdc + command: "ceph-volume lvm zap /dev/sdc --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create /dev/sdb and /dev/sdc again + command: "ceph-volume lvm batch --yes --bluestore --dmcrypt /dev/sdb /dev/sdc" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml new file mode 100644 index 0000000000000..95909f97b298c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml @@ -0,0 +1,46 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.1 + command: "ceph osd purge osd.1 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph osd purge osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap /dev/sdd + command: "ceph-volume lvm zap /dev/sdb --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + + - name: zap /dev/sdc + command: "ceph-volume lvm zap /dev/sdc --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create /dev/sdb and /dev/sdc again + command: "ceph-volume lvm batch --yes --filestore /dev/sdb /dev/sdc" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml new file mode 100644 index 0000000000000..81f84e9196cd0 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml @@ -0,0 +1,46 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.1 + command: "ceph osd purge osd.1 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph osd purge osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap /dev/sdd + command: "ceph-volume lvm zap /dev/sdb --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + + - name: zap /dev/sdc + command: "ceph-volume lvm zap /dev/sdc --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: batch create /dev/sdb and /dev/sdc again + command: "ceph-volume lvm batch --yes --filestore --dmcrypt /dev/sdb /dev/sdc" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini new file mode 100644 index 0000000000000..f3924f59bb565 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -0,0 +1,56 @@ +[tox] +envlist = {centos7,xenial}-bluestore-{single-type,single-type-dmcrypt} +skipsdist = True + +[testenv] +whitelist_externals = + vagrant + bash + git +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + ANSIBLE_SSH_RETRIES = 5 + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 +deps= + ansible~=2.6,<2.7 + testinfra + pytest-xdist + notario>=0.0.13 +changedir= + centos7-filestore-single-type: {toxinidir}/centos7/filestore/single-type + centos7-bluestore-single-type-dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt + xenial-filestore-single-type: {toxinidir}/xenial/filestore/single-type + xenial-bluestore-single-type-dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + + vagrant up {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using ceph-ansible tests + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + # reboot all vms - attempt + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # retest to ensure cluster came back up correctly after rebooting + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + # destroy an OSD, zap it's device and recreate it using it's ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # retest to ensure cluster came back up correctly + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} -- 2.39.5