From: Dimitri Savineau Date: Wed, 21 Jul 2021 21:07:24 +0000 (-0400) Subject: ceph-volume/tests: use pytest rerunfailures X-Git-Tag: v16.2.6~123^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=c335fa6051c3f4aeac72c80d41312ad78af889cb;p=ceph.git ceph-volume/tests: use pytest rerunfailures We already install the dependency from ceph-ansible requirements.txt and to avoid false positive (like after rebooting a node) we can retry failing test. Signed-off-by: Dimitri Savineau (cherry picked from commit 95056a24e4fbc19307f5b32724bfdb459a42f7ab) --- diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini index 761339b0c48f..508d1b4c6cd0 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini @@ -51,7 +51,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} @@ -60,13 +60,13 @@ commands= sleep 30 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # test zap OSDs by ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini index 4cf6387832c8..bec30e6d7cb4 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -51,7 +51,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state using testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # reboot all vms - attempt bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} @@ -60,12 +60,12 @@ commands= sleep 30 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # destroy an OSD, zap it's device and recreate it using it's ID ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml # retest to ensure cluster came back up correctly - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini index 7c106ccbe776..1fdfe26a8768 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini +++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini @@ -41,7 +41,7 @@ commands= ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml # test cluster state testinfra - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml @@ -53,6 +53,6 @@ commands= sleep 120 # retest to ensure cluster came back up correctly after rebooting - py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}