]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume tests use granular env vars for vagrant 20864/head
authorAlfredo Deza <adeza@redhat.com>
Tue, 13 Mar 2018 10:57:57 +0000 (06:57 -0400)
committerAlfredo Deza <adeza@redhat.com>
Tue, 13 Mar 2018 10:57:57 +0000 (06:57 -0400)
Fixes the error that would inject an extra argument to vagrant when the
environment variable is not present

Signed-off-by: Alfredo Deza <adeza@redhat.com>
src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini

index f5d44e68556ccc360f1df1550c1c3a855b130a72..683ef840a8bc6cf3a11f8d8c3d8b04683d772419 100644 (file)
@@ -39,7 +39,7 @@ changedir=
 commands=
   git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
 
-  vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox}
+  vagrant up {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
   bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
 
   # create logical volumes to test with on the vms
@@ -58,7 +58,7 @@ commands=
   testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
 
   # reboot all vms
-  vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision
+  vagrant reload {env:VAGRANT_RELOAD_FLAGS:"--no-provision"}
 
   # retest to ensure cluster came back up correctly after rebooting
   testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
@@ -69,4 +69,4 @@ commands=
   # retest to ensure cluster came back up correctly
   testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
 
-  vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force
+  vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
index 21d9c3a709fa12fb30680219cc39d930b3d7adbc..bfb265fe987aa60ca63d0bb5ed94b1b39c488e6b 100644 (file)
@@ -36,7 +36,7 @@ changedir=
 commands=
   git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
 
-  vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox}
+  vagrant up {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
   bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
 
   # use ceph-ansible to deploy a ceph cluster on the vms
@@ -52,7 +52,7 @@ commands=
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
 
   # reboot all vms
-  vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision
+  vagrant reload {env:VAGRANT_RELOAD_FLAGS:"--no-provision"}
 
   # wait 2 minutes for services to be ready
   sleep 120
@@ -60,4 +60,4 @@ commands=
   # retest to ensure cluster came back up correctly after rebooting
   testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
 
-  vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force
+  vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}