]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: use our own testinfra suite for functional testing
authorAndrew Schoen <aschoen@redhat.com>
Wed, 27 Feb 2019 21:14:03 +0000 (15:14 -0600)
committerAndrew Schoen <aschoen@redhat.com>
Thu, 28 Feb 2019 18:57:24 +0000 (12:57 -0600)
This allows us to run a smaller subset of tests from the ones provided
by ceph-ansible and test only the things that pertain to ceph-volume.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
(cherry picked from commit ccfc8e6dc42b55f14fa3b4ef210cb639b9a7e20b)

src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py [new file with mode: 0644]

index 4c3af68115702ea7e12ef89c8304a50bf9261857..a2e0489404f9a5d415bff77c9e71f11fa03c1db0 100644 (file)
@@ -48,20 +48,20 @@ commands=
   # prepare nodes for testing with testinfra
   ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
 
-  # test cluster state using ceph-ansible tests
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  # test cluster state using testinfra 
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # reboot all vms - attempt
   bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
 
   # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # destroy an OSD, zap it's device and recreate it using it's ID
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
 
   # retest to ensure cluster came back up correctly
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # test zap OSDs by ID
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
index 8df6d9772908a37cce8e39c8e66e7f557402a070..141f869badf46f764f68cad0752c5087cf3e0866 100644 (file)
@@ -56,19 +56,19 @@ commands=
   # prepare nodes for testing with testinfra
   ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
 
-  # test cluster state using ceph-ansible tests
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  # test cluster state using testinfra 
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # reboot all vms - attempt
   bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
 
   # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # destroy an OSD, zap it's device and recreate it using it's ID
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
 
   # retest to ensure cluster came back up correctly
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
index aa50dfb397c8f703216072db50d4cfe0555b796b..7b60c2ee5f0d870d9e4b09c3da0be5fdd3e349d2 100644 (file)
@@ -46,8 +46,8 @@ commands=
   # prepare nodes for testing with testinfra
   ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
 
-  # test cluster state using ceph-ansible tests
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  # test cluster state testinfra
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
   ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
@@ -59,6 +59,6 @@ commands=
   sleep 120
 
   # retest to ensure cluster came back up correctly after rebooting
-  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
 
   vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
new file mode 100644 (file)
index 0000000..b16ab7b
--- /dev/null
@@ -0,0 +1,109 @@
+import pytest
+import os
+
+
+@pytest.fixture()
+def node(host, request):
+    """
+    This fixture represents a single node in the ceph cluster. Using the
+    host.ansible fixture provided by testinfra it can access all the ansible
+    variables provided to it by the specific test scenario being ran.
+
+    You must include this fixture on any tests that operate on specific type
+    of node because it contains the logic to manage which tests a node
+    should run.
+    """
+    ansible_vars = host.ansible.get_variables()
+    # tox will pass in this environment variable. we need to do it this way
+    # because testinfra does not collect and provide ansible config passed in
+    # from using --extra-vars
+    ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "luminous")
+    group_names = ansible_vars["group_names"]
+    ceph_release_num = {
+        'jewel': 10,
+        'kraken': 11,
+        'luminous': 12,
+        'mimic': 13,
+        'dev': 99
+    }
+
+    # capture the initial/default state
+    test_is_applicable = False
+    for marker in request.node.iter_markers():
+        if marker.name in group_names or marker.name == 'all':
+            test_is_applicable = True
+            break
+    # Check if any markers on the test method exist in the nodes group_names.
+    # If they do not, this test is not valid for the node being tested.
+    if not test_is_applicable:
+        reason = "%s: Not a valid test for node type: %s" % (
+            request.function, group_names)
+        pytest.skip(reason)
+
+    osd_ids = []
+    osds = []
+    cluster_address = ""
+    # I can assume eth1 because I know all the vagrant
+    # boxes we test with use that interface
+    address = host.interface("eth1").addresses[0]
+    subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
+    num_mons = len(ansible_vars["groups"]["mons"])
+    num_osds = len(ansible_vars.get("devices", []))
+    if not num_osds:
+        num_osds = len(ansible_vars.get("lvm_volumes", []))
+    osds_per_device = ansible_vars.get("osds_per_device", 1)
+    num_osds = num_osds * osds_per_device
+
+    # If number of devices doesn't map to number of OSDs, allow tests to define
+    # that custom number, defaulting it to ``num_devices``
+    num_osds = ansible_vars.get('num_osds', num_osds)
+    cluster_name = ansible_vars.get("cluster", "ceph")
+    conf_path = "/etc/ceph/{}.conf".format(cluster_name)
+    if "osds" in group_names:
+        # I can assume eth2 because I know all the vagrant
+        # boxes we test with use that interface. OSDs are the only
+        # nodes that have this interface.
+        cluster_address = host.interface("eth2").addresses[0]
+        cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
+        if cmd.rc == 0:
+            osd_ids = cmd.stdout.rstrip("\n").split("\n")
+            osds = osd_ids
+
+    data = dict(
+        address=address,
+        subnet=subnet,
+        vars=ansible_vars,
+        osd_ids=osd_ids,
+        num_mons=num_mons,
+        num_osds=num_osds,
+        cluster_name=cluster_name,
+        conf_path=conf_path,
+        cluster_address=cluster_address,
+        osds=osds,
+        ceph_stable_release=ceph_stable_release,
+        ceph_release_num=ceph_release_num,
+    )
+    return data
+
+
+def pytest_collection_modifyitems(session, config, items):
+    for item in items:
+        test_path = item.location[0]
+        if "mon" in test_path:
+            item.add_marker(pytest.mark.mons)
+        elif "osd" in test_path:
+            item.add_marker(pytest.mark.osds)
+        elif "mds" in test_path:
+            item.add_marker(pytest.mark.mdss)
+        elif "mgr" in test_path:
+            item.add_marker(pytest.mark.mgrs)
+        elif "rbd-mirror" in test_path:
+            item.add_marker(pytest.mark.rbdmirrors)
+        elif "rgw" in test_path:
+            item.add_marker(pytest.mark.rgws)
+        elif "nfs" in test_path:
+            item.add_marker(pytest.mark.nfss)
+        elif "iscsi" in test_path:
+            item.add_marker(pytest.mark.iscsigws)
+        else:
+            item.add_marker(pytest.mark.all)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
new file mode 100644 (file)
index 0000000..0bc14aa
--- /dev/null
@@ -0,0 +1,61 @@
+import pytest
+import json
+
+
+class TestOSDs(object):
+
+    def test_ceph_osd_package_is_installed(self, node, host):
+        assert host.package("ceph-osd").is_installed
+
+    def test_osds_listen_on_public_network(self, node, host):
+        # TODO: figure out way to paramaterize this test
+        nb_port = (node["num_osds"] * 4)
+        assert host.check_output(
+            "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port)  # noqa E501
+
+    def test_osds_listen_on_cluster_network(self, node, host):
+        # TODO: figure out way to paramaterize this test
+        nb_port = (node["num_osds"] * 4)
+        assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" %  # noqa E501
+                                 (node["cluster_address"])) == str(nb_port)
+
+    def test_osd_services_are_running(self, node, host):
+        # TODO: figure out way to paramaterize node['osds'] for this test
+        for osd in node["osds"]:
+            assert host.service("ceph-osd@%s" % osd).is_running
+
+    def test_osd_are_mounted(self, node, host):
+        # TODO: figure out way to paramaterize node['osd_ids'] for this test
+        for osd_id in node["osd_ids"]:
+            osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
+                cluster=node["cluster_name"],
+                osd_id=osd_id,
+            )
+            assert host.mount_point(osd_path).exists
+
+    def test_ceph_volume_is_installed(self, node, host):
+        host.exists('ceph-volume')
+
+    def test_ceph_volume_systemd_is_installed(self, node, host):
+        host.exists('ceph-volume-systemd')
+
+    def _get_osd_id_from_host(self, node, osd_tree):
+        children = []
+        for n in osd_tree['nodes']:
+            if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host':  # noqa E501
+                children = n['children']
+        return children
+
+    def _get_nb_up_osds_from_ids(self, node, osd_tree):
+        nb_up = 0
+        ids = self._get_osd_id_from_host(node, osd_tree)
+        for n in osd_tree['nodes']:
+            if n['id'] in ids and n['status'] == 'up':
+                nb_up += 1
+        return nb_up
+
+    def test_all_osds_are_up_and_in(self, node, host):
+        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(  # noqa E501
+            cluster=node["cluster_name"])
+        output = json.loads(host.check_output(cmd))
+        assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)