]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
tests: update testinfra release
authorDimitri Savineau <dsavinea@redhat.com>
Tue, 30 Apr 2019 14:24:25 +0000 (10:24 -0400)
committerGuillaume Abrioux <gabrioux@redhat.com>
Mon, 20 May 2019 11:04:58 +0000 (13:04 +0200)
In order to support ansible 2.8 with testinfra we need to use the
latest release (3.0.x).
Adding ssh-config option to py.test.
Also bumping the pytest and xdist version.

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
tests/functional/tests/mon/test_mons.py
tests/requirements.txt
tox-dashboard.ini
tox-update.ini
tox.ini

index 3e7735025fea4cb386f207bbbe124ce07ead8598..791eb8d0bb329dec27de1d34d91fad541dd4829c 100644 (file)
@@ -29,10 +29,10 @@ class TestMons(object):
         output = host.check_output(cmd)
         assert output.strip().startswith("cluster")
 
-    def test_ceph_config_has_inital_members_line(self, node, File, setup):
-        assert File(setup["conf_path"]).contains("^mon initial members = .*$")
+    def test_ceph_config_has_inital_members_line(self, node, host, setup):
+        assert host.file(setup["conf_path"]).contains("^mon initial members = .*$")
 
-    def test_initial_members_line_has_correct_value(self, node, host, File, setup):  # noqa E501
+    def test_initial_members_line_has_correct_value(self, node, host, setup):
         mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=setup['cluster_name']))  # noqa E501
         result = True
         for host in node["vars"]["groups"]["mons"]:
index 520e292c3f67917f54b0e8e202e33796e7fc3225..bd472792442546cf8618646c987ab8621dd2a7b0 100644 (file)
@@ -1,10 +1,11 @@
 # These are Python requirements needed to run the functional tests
 six==1.10.0
-testinfra==1.19.0
-pytest-xdist==1.27.0
-pytest==3.6.1
+testinfra>=3.0,<3.1
+pytest-xdist==1.28.0
+pytest>=4.4,<4.5
 notario>=0.0.13
 ansible>=2.8,<2.9
 netaddr
 mock
 jmespath
+paramiko
index 656c2bdd8deda4d48a06abd733702dcf4c18adc2..def8690a3a0a4ca15ff536b0ed99c6a2785653a0 100644 (file)
@@ -64,6 +64,6 @@ commands=
       ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
   "
 
-  bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:nautilus} py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
+  bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:nautilus} py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
 
   vagrant destroy --force
index 030db95fb1d46e233422eda383092060cdaa34da..0ef2f3c07bbadd4ab77bea04ec14d219dad80fc3 100644 (file)
@@ -80,6 +80,6 @@ commands=
       ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \
   "
 
-  bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests"
+  bash -c "CEPH_STABLE_RELEASE={env:UPDATE_CEPH_STABLE_RELEASE:nautilus} py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
 
   vagrant destroy --force
diff --git a/tox.ini b/tox.ini
index d2ab7018f2d88a57d21d40dc1f215799cc2dec84..a716ad5bccd18e82eebf30394576384572dd268e 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -51,7 +51,7 @@ commands=
   # wait 30sec for services to be ready
   sleep 30
   # test cluster state using ceph-ansible tests
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {envdir}/tmp/ceph-ansible/tests/functional/tests
 
   # install ceph-ansible@master requirements
   pip install -r {toxinidir}/tests/requirements.txt
@@ -68,13 +68,13 @@ commands=
   "
 
   # test cluster state again using ceph-ansible tests
-  bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+  bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
 
   # reboot all vms
   ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/reboot.yml
 
   # retest to ensure cluster came back up correctly after rebooting
-  bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/tests/functional/tests"
+  bash -c "CEPH_STABLE_RELEASE=nautilus py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
 
   vagrant destroy --force
 
@@ -138,7 +138,7 @@ commands=
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
   "
   # test that the cluster can be redeployed in a healthy state
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [purge-lvm]
 commands=
@@ -165,7 +165,7 @@ commands=
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
   "
   # test that the cluster can be redeployed in a healthy state
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [shrink-mon]
 commands=
@@ -196,7 +196,7 @@ commands=
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
   "
 
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-switch-to-containers --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [add-mons]
 commands=
@@ -211,7 +211,7 @@ commands=
       ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
       "
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [add-osds]
 commands=
@@ -227,7 +227,7 @@ commands=
       ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
       "
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [add-mgrs]
 commands=
@@ -257,7 +257,7 @@ commands=
       ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
       "
-  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [add-rbdmirrors]
 commands=
@@ -272,7 +272,7 @@ commands=
       ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
       "
-  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [add-rgws]
 commands=
@@ -287,7 +287,7 @@ commands=
       ceph_dev_branch={env:CEPH_DEV_BRANCH:master} \
       ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \
       "
-  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 {toxinidir}/tests/functional/tests
+  py.test -n 8 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
 [rgw-multisite]
 commands=
@@ -434,14 +434,14 @@ commands=
   # wait 30sec for services to be ready
   sleep 30
   # test cluster state using ceph-ansible tests
-  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+  py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
   # reboot all vms
   all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
 
   # wait 30sec for services to be ready
   # retest to ensure cluster came back up correctly after rebooting
-  all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} {toxinidir}/tests/functional/tests
+  all_daemons: py.test -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
   # handlers/idempotency test
   all_daemons: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:nautilus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-master} ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} copy_admin_key={env:COPY_ADMIN_KEY:False}" --extra-vars @ceph-override.json