From: Alfredo Deza Date: Mon, 12 Mar 2018 14:06:47 +0000 (-0400) Subject: ceph-volume tests add optional flags for vagrant X-Git-Tag: v13.1.0~586^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=5fbea40fc60fa12ef6c649dd816f988df241ac5c;p=ceph.git ceph-volume tests add optional flags for vagrant Signed-off-by: Alfredo Deza --- diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index a2c08e753070..f5d44e68556c 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -39,7 +39,7 @@ changedir= commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - vagrant up --no-provision {posargs:--provider=virtualbox} + vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} # create logical volumes to test with on the vms @@ -58,7 +58,7 @@ commands= testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests # reboot all vms - vagrant reload --no-provision + vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision # retest to ensure cluster came back up correctly after rebooting testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests @@ -69,4 +69,4 @@ commands= # retest to ensure cluster came back up correctly testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests - vagrant destroy --force + vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index e8e62dc12796..21d9c3a709fa 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -36,7 +36,7 @@ changedir= commands= git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - vagrant up --no-provision {posargs:--provider=virtualbox} + vagrant up {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} # use ceph-ansible to deploy a ceph cluster on the vms @@ -52,7 +52,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # reboot all vms - vagrant reload --no-provision + vagrant reload {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --no-provision # wait 2 minutes for services to be ready sleep 120 @@ -60,4 +60,4 @@ commands= # retest to ensure cluster came back up correctly after rebooting testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests - vagrant destroy --force + vagrant destroy {env:VAGRANT_GLOBAL_EXTRA_FLAGS:""} --force