]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume tests.functional move top level tox.ini to lvm/tox.ini
authorAlfredo Deza <adeza@redhat.com>
Fri, 10 Nov 2017 18:14:55 +0000 (13:14 -0500)
committerAlfredo Deza <adeza@redhat.com>
Fri, 10 Nov 2017 18:23:17 +0000 (13:23 -0500)
Signed-off-by: Alfredo Deza <adeza@redhat.com>
src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/tox.ini [deleted file]

diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
new file mode 100644 (file)
index 0000000..6fda7dc
--- /dev/null
@@ -0,0 +1,54 @@
+[tox]
+envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate}
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+    vagrant
+    bash
+    git
+passenv=*
+setenv=
+  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+  ANSIBLE_STDOUT_CALLBACK = debug
+  ANSIBLE_RETRY_FILES_ENABLED = False
+  VAGRANT_CWD = {changedir}
+  CEPH_VOLUME_DEBUG = 1
+deps=
+  ansible==2.4.1
+  testinfra==1.7.1
+  pytest-xdist
+changedir=
+  centos7-filestore-create: {toxinidir}/centos7/create
+  xenial-filestore-create: {toxinidir}/xenial/create
+  xenial-bluestore-create: {toxinidir}/xenial/bluestore
+  centos7-bluestore-create: {toxinidir}/centos7/bluestore
+  # TODO: these are placeholders for now, eventually we want to
+  # test the prepare/activate workflow of ceph-volume as well
+  xenial-prepare_activate: {toxinidir}/xenial/prepare_activate
+  centos7-prepare_activate: {toxinidir}/xenial/prepare_activate
+commands=
+  git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+
+  vagrant up --no-provision {posargs:--provider=virtualbox}
+  bash {toxinidir}/scripts/generate_ssh_config.sh {changedir}
+
+  # create logical volumes to test with on the vms
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
+
+  # use ceph-ansible to deploy a ceph cluster on the vms
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
+
+  # prepare nodes for testing with testinfra
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+  # test cluster state using ceph-ansible tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+  # reboot all vms
+  vagrant reload --no-provision
+
+  # retest to ensure cluster came back up correctly after rebooting
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+  vagrant destroy --force
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/tox.ini
deleted file mode 100644 (file)
index 6fda7dc..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-[tox]
-envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate}
-skipsdist = True
-
-[testenv]
-whitelist_externals =
-    vagrant
-    bash
-    git
-passenv=*
-setenv=
-  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
-  ANSIBLE_STDOUT_CALLBACK = debug
-  ANSIBLE_RETRY_FILES_ENABLED = False
-  VAGRANT_CWD = {changedir}
-  CEPH_VOLUME_DEBUG = 1
-deps=
-  ansible==2.4.1
-  testinfra==1.7.1
-  pytest-xdist
-changedir=
-  centos7-filestore-create: {toxinidir}/centos7/create
-  xenial-filestore-create: {toxinidir}/xenial/create
-  xenial-bluestore-create: {toxinidir}/xenial/bluestore
-  centos7-bluestore-create: {toxinidir}/centos7/bluestore
-  # TODO: these are placeholders for now, eventually we want to
-  # test the prepare/activate workflow of ceph-volume as well
-  xenial-prepare_activate: {toxinidir}/xenial/prepare_activate
-  centos7-prepare_activate: {toxinidir}/xenial/prepare_activate
-commands=
-  git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
-
-  vagrant up --no-provision {posargs:--provider=virtualbox}
-  bash {toxinidir}/scripts/generate_ssh_config.sh {changedir}
-
-  # create logical volumes to test with on the vms
-  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
-
-  # use ceph-ansible to deploy a ceph cluster on the vms
-  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
-
-  # prepare nodes for testing with testinfra
-  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-
-  # test cluster state using ceph-ansible tests
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
-
-  # reboot all vms
-  vagrant reload --no-provision
-
-  # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
-
-  vagrant destroy --force