From: Guillaume Abrioux Date: Fri, 22 Jun 2018 23:43:49 +0000 (+0200) Subject: tests: refact test_all_*_osds_are_up_and_in X-Git-Tag: v3.2.0beta1~6 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=fe79a5d24086fc61ae84a59bd61a053cddb62941;p=ceph-ansible.git tests: refact test_all_*_osds_are_up_and_in these tests are skipped on bluestore osds scenarios. they were going to fail anyway since they are run on mon nodes and `devices` is defined in inventory for each osd node. It means `num_devices * num_osd_hosts` returns `0`. The result is that the test expects to have 0 OSDs up. The idea here is to move these tests so they are run on OSD nodes. Each OSD node checks their respective OSD to be UP, if an OSD has 2 devices defined in `devices` variable, it means we are checking for 2 OSD to be up on that node, if each node has all its OSD up, we can say all OSD are up. Signed-off-by: Guillaume Abrioux --- diff --git a/tests/conftest.py b/tests/conftest.py index 9c4ca5d9a..22bf1def7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -89,8 +89,6 @@ def node(host, request): num_devices = len(ansible_vars.get("devices", [])) if not num_devices: num_devices = len(ansible_vars.get("lvm_volumes", [])) - num_osd_hosts = len(ansible_vars["groups"]["osds"]) - total_osds = num_devices * num_osd_hosts cluster_name = ansible_vars.get("cluster", "ceph") conf_path = "/etc/ceph/{}.conf".format(cluster_name) if "osds" in group_names: @@ -116,8 +114,6 @@ def node(host, request): osd_ids=osd_ids, num_mons=num_mons, num_devices=num_devices, - num_osd_hosts=num_osd_hosts, - total_osds=total_osds, cluster_name=cluster_name, conf_path=conf_path, cluster_address=cluster_address, diff --git a/tests/functional/tests/mon/test_mons.py b/tests/functional/tests/mon/test_mons.py index 638112e22..e06e18cc8 100644 --- a/tests/functional/tests/mon/test_mons.py +++ b/tests/functional/tests/mon/test_mons.py @@ -40,22 +40,3 @@ class TestMons(object): result = False assert result - -class TestOSDs(object): - - @pytest.mark.no_docker - def test_all_osds_are_up_and_in(self, node, host): - cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"]) - output = host.check_output(cmd) - phrase = "{num_osds} osds: {num_osds} up, {num_osds} in".format(num_osds=node["total_osds"]) - assert phrase in output - - @pytest.mark.docker - def test_all_docker_osds_are_up_and_in(self, node, host): - cmd = "sudo docker exec ceph-mon-{} ceph --cluster={} --connect-timeout 5 -s".format( - node["vars"]["inventory_hostname"], - node["cluster_name"] - ) - output = host.check_output(cmd) - phrase = "{num_osds} osds: {num_osds} up, {num_osds} in".format(num_osds=node["total_osds"]) - assert phrase in output diff --git a/tests/functional/tests/osd/test_osds.py b/tests/functional/tests/osd/test_osds.py index f12fa95a7..8dbee2e86 100644 --- a/tests/functional/tests/osd/test_osds.py +++ b/tests/functional/tests/osd/test_osds.py @@ -1,4 +1,6 @@ import pytest +import json +import os class TestOSDs(object): @@ -45,3 +47,32 @@ class TestOSDs(object): @pytest.mark.lvm_scenario def test_ceph_volume_systemd_is_installed(self, node, host): host.exists('ceph-volume-systemd') + + def _get_osd_id_from_host(self, node, osd_tree): + for n in osd_tree['nodes']: + if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': + children = n['children'] + return children + + def _get_nb_up_osds_from_ids(self, node, osd_tree): + nb_up = 0 + ids = self._get_osd_id_from_host(node, osd_tree) + for n in osd_tree['nodes']: + if n['id'] in ids and n['status'] == 'up': + nb_up += 1 + return nb_up + + @pytest.mark.no_docker + def test_all_osds_are_up_and_in(self, node, host): + cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"]) + output = json.loads(host.check_output(cmd)) + assert node["num_devices"] == self._get_nb_up_osds_from_ids(node, output) + + @pytest.mark.docker + def test_all_docker_osds_are_up_and_in(self, node, host): + cmd = "sudo docker exec ceph-osd-{hostname}-sda ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( + hostname=node["vars"]["inventory_hostname"], + cluster=node["cluster_name"] + ) + output = json.loads(host.check_output(cmd)) + assert node["num_devices"] == self._get_nb_up_osds_from_ids(node, output) \ No newline at end of file