From: Alfredo Deza Date: Wed, 29 May 2019 12:56:37 +0000 (-0400) Subject: tests: pass --ssh-config to pytest to resolve hosts when connecting X-Git-Tag: v12.2.13~194^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=1cd578e465a567a3e79a76d8946fc4d2a9e8f633;p=ceph.git tests: pass --ssh-config to pytest to resolve hosts when connecting Signed-off-by: Alfredo Deza (cherry picked from commit ed708e520999c145c3cf8a0c51e4605f275a1771) --- diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini index db965243686..b8564ec5eab 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -48,20 +48,20 @@ commands= # prepare nodes for testing with testinfra ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml - # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + # test cluster state using testinfra + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # test zap OSDs by ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index d61c23719d3..1c279edc331 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -57,18 +57,18 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy --force diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index 2856d9ad0cb..4cd5be55646 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -47,7 +47,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state testinfra - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml @@ -59,6 +59,6 @@ commands= sleep 120 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy --force