From 89757ad12e6938fe2bf997e0f21a5ffac082e30b Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Thu, 9 Nov 2017 15:40:51 -0500 Subject: [PATCH] ceph-volume tests.functional create a separate tox.ini environ for `simple` Signed-off-by: Alfredo Deza --- .../tests/functional/simple/tox.ini | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini new file mode 100644 index 0000000000000..2be93fa6b4c0a --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -0,0 +1,47 @@ +[tox] +envlist = {centos7,xenial}-{filestore,bluestore}-{activate} +skipsdist = True + +[testenv] +whitelist_externals = + vagrant + bash + git +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 +deps= + ansible==2.4.1 + testinfra==1.7.1 + pytest-xdist +changedir= + centos7-filestore-activate: {toxinidir}/centos7/filestore/activate +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + + vagrant up --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using ceph-ansible tests + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # reboot all vms + vagrant reload --no-provision + + # retest to ensure cluster came back up correctly after rebooting + testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests + + vagrant destroy --force -- 2.39.5