]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Revert "tests: test `test_all_docker_osds_are_up_and_in()` from mon nodes"
authorGuillaume Abrioux <gabrioux@redhat.com>
Thu, 18 Oct 2018 13:43:36 +0000 (15:43 +0200)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Fri, 19 Oct 2018 00:12:43 +0000 (00:12 +0000)
This approach doesn't work with all scenarios because it's comparing a
local OSD number expected to a global OSD number found in the whole
cluster.

This reverts commit b8ad35ceb99cdbd1644c79dd689b818f095ba8b8.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
tests/functional/tests/mon/test_osds_from_mons.py [deleted file]
tests/functional/tests/osd/test_osds.py

diff --git a/tests/functional/tests/mon/test_osds_from_mons.py b/tests/functional/tests/mon/test_osds_from_mons.py
deleted file mode 100644 (file)
index f8eeab7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-import pytest
-import json
-
-
-class TestOsdsFromMons(object):
-    def _get_nb_osd_up(self, osd_tree):
-        nb_up = 0
-        for n in osd_tree['nodes']:
-            if n['type'] == 'osd' and n['status'] == 'up':
-                nb_up += 1
-        return nb_up
-
-    @pytest.mark.no_docker
-    def test_all_osds_are_up_and_in(self, node, host):
-        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format(cluster=node["cluster_name"])
-        output = json.loads(host.check_output(cmd))
-        nb_osd_up = self._get_nb_osd_up(output)
-        assert int(node["num_osds"]) == int(nb_osd_up)
-
-    @pytest.mark.docker
-    def test_all_docker_osds_are_up_and_in(self, node, host):
-        cmd = "sudo docker exec ceph-mon-{inventory_hostname} ceph --cluster={cluster} --connect-timeout 5 osd tree -f json".format(
-            cluster=node["cluster_name"],
-            inventory_hostname=node['vars']['inventory_hostname']
-        )
-        output = json.loads(host.check_output(cmd))
-        nb_osd_up = self._get_nb_osd_up(output)
-        assert node["num_osds"] == nb_osd_up
index 20fbeacb48d1de9e00bd95d3008f238217980e2f..851c7d7f520117661bdb2750c5a1b726fe3778b1 100644 (file)
@@ -47,3 +47,40 @@ class TestOSDs(object):
     @pytest.mark.lvm_scenario
     def test_ceph_volume_systemd_is_installed(self, node, host):
         host.exists('ceph-volume-systemd')
+
+    def _get_osd_id_from_host(self, node, osd_tree):
+        children = []
+        for n in osd_tree['nodes']:
+            if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host':
+                children = n['children']
+        return children
+
+    def _get_nb_up_osds_from_ids(self, node, osd_tree):
+        nb_up = 0
+        ids = self._get_osd_id_from_host(node, osd_tree)
+        for n in osd_tree['nodes']:
+            if n['id'] in ids and n['status'] == 'up':
+                nb_up += 1
+        return nb_up
+
+    @pytest.mark.no_docker
+    def test_all_osds_are_up_and_in(self, node, host):
+        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"])
+        output = json.loads(host.check_output(cmd))
+        assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
+
+    @pytest.mark.docker
+    def test_all_docker_osds_are_up_and_in(self, node, host):
+        osd_scenario = node["vars"].get('osd_scenario', False)
+        if osd_scenario in ['lvm', 'lvm-batch']:
+            osd_id = "0"
+        else:
+            hostname = node["vars"]["inventory_hostname"]
+            osd_id = os.path.join(hostname+"-sda")
+
+        cmd = "sudo docker exec ceph-osd-{osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
+            osd_id=osd_id,
+            cluster=node["cluster_name"]
+        )
+        output = json.loads(host.check_output(cmd))
+        assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)