# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
- # test cluster state using ceph-ansible tests
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ # test cluster state using testinfra
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
# retest to ensure cluster came back up correctly after rebooting
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
# retest to ensure cluster came back up correctly
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# test zap OSDs by ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
- # test cluster state using ceph-ansible tests
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ # test cluster state using testinfra
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# reboot all vms - attempt
bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
# retest to ensure cluster came back up correctly after rebooting
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# destroy an OSD, zap it's device and recreate it using it's ID
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
# retest to ensure cluster came back up correctly
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
# prepare nodes for testing with testinfra
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
- # test cluster state using ceph-ansible tests
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ # test cluster state testinfra
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
# make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
sleep 120
# retest to ensure cluster came back up correctly after rebooting
- testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {toxinidir}/../tests
vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
--- /dev/null
+import pytest
+import os
+
+
+@pytest.fixture()
+def node(host, request):
+ """
+ This fixture represents a single node in the ceph cluster. Using the
+ host.ansible fixture provided by testinfra it can access all the ansible
+ variables provided to it by the specific test scenario being ran.
+
+ You must include this fixture on any tests that operate on specific type
+ of node because it contains the logic to manage which tests a node
+ should run.
+ """
+ ansible_vars = host.ansible.get_variables()
+ # tox will pass in this environment variable. we need to do it this way
+ # because testinfra does not collect and provide ansible config passed in
+ # from using --extra-vars
+ ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "luminous")
+ group_names = ansible_vars["group_names"]
+ ceph_release_num = {
+ 'jewel': 10,
+ 'kraken': 11,
+ 'luminous': 12,
+ 'mimic': 13,
+ 'dev': 99
+ }
+
+ # capture the initial/default state
+ test_is_applicable = False
+ for marker in request.node.iter_markers():
+ if marker.name in group_names or marker.name == 'all':
+ test_is_applicable = True
+ break
+ # Check if any markers on the test method exist in the nodes group_names.
+ # If they do not, this test is not valid for the node being tested.
+ if not test_is_applicable:
+ reason = "%s: Not a valid test for node type: %s" % (
+ request.function, group_names)
+ pytest.skip(reason)
+
+ osd_ids = []
+ osds = []
+ cluster_address = ""
+ # I can assume eth1 because I know all the vagrant
+ # boxes we test with use that interface
+ address = host.interface("eth1").addresses[0]
+ subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
+ num_mons = len(ansible_vars["groups"]["mons"])
+ num_osds = len(ansible_vars.get("devices", []))
+ if not num_osds:
+ num_osds = len(ansible_vars.get("lvm_volumes", []))
+ osds_per_device = ansible_vars.get("osds_per_device", 1)
+ num_osds = num_osds * osds_per_device
+
+ # If number of devices doesn't map to number of OSDs, allow tests to define
+ # that custom number, defaulting it to ``num_devices``
+ num_osds = ansible_vars.get('num_osds', num_osds)
+ cluster_name = ansible_vars.get("cluster", "ceph")
+ conf_path = "/etc/ceph/{}.conf".format(cluster_name)
+ if "osds" in group_names:
+ # I can assume eth2 because I know all the vagrant
+ # boxes we test with use that interface. OSDs are the only
+ # nodes that have this interface.
+ cluster_address = host.interface("eth2").addresses[0]
+ cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
+ if cmd.rc == 0:
+ osd_ids = cmd.stdout.rstrip("\n").split("\n")
+ osds = osd_ids
+
+ data = dict(
+ address=address,
+ subnet=subnet,
+ vars=ansible_vars,
+ osd_ids=osd_ids,
+ num_mons=num_mons,
+ num_osds=num_osds,
+ cluster_name=cluster_name,
+ conf_path=conf_path,
+ cluster_address=cluster_address,
+ osds=osds,
+ ceph_stable_release=ceph_stable_release,
+ ceph_release_num=ceph_release_num,
+ )
+ return data
+
+
+def pytest_collection_modifyitems(session, config, items):
+ for item in items:
+ test_path = item.location[0]
+ if "mon" in test_path:
+ item.add_marker(pytest.mark.mons)
+ elif "osd" in test_path:
+ item.add_marker(pytest.mark.osds)
+ elif "mds" in test_path:
+ item.add_marker(pytest.mark.mdss)
+ elif "mgr" in test_path:
+ item.add_marker(pytest.mark.mgrs)
+ elif "rbd-mirror" in test_path:
+ item.add_marker(pytest.mark.rbdmirrors)
+ elif "rgw" in test_path:
+ item.add_marker(pytest.mark.rgws)
+ elif "nfs" in test_path:
+ item.add_marker(pytest.mark.nfss)
+ elif "iscsi" in test_path:
+ item.add_marker(pytest.mark.iscsigws)
+ else:
+ item.add_marker(pytest.mark.all)
--- /dev/null
+import pytest
+import json
+
+
+class TestOSDs(object):
+
+ def test_ceph_osd_package_is_installed(self, node, host):
+ assert host.package("ceph-osd").is_installed
+
+ def test_osds_listen_on_public_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * 4)
+ assert host.check_output(
+ "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501
+
+ def test_osds_listen_on_cluster_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * 4)
+ assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
+ (node["cluster_address"])) == str(nb_port)
+
+ def test_osd_services_are_running(self, node, host):
+ # TODO: figure out way to paramaterize node['osds'] for this test
+ for osd in node["osds"]:
+ assert host.service("ceph-osd@%s" % osd).is_running
+
+ def test_osd_are_mounted(self, node, host):
+ # TODO: figure out way to paramaterize node['osd_ids'] for this test
+ for osd_id in node["osd_ids"]:
+ osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
+ cluster=node["cluster_name"],
+ osd_id=osd_id,
+ )
+ assert host.mount_point(osd_path).exists
+
+ def test_ceph_volume_is_installed(self, node, host):
+ host.exists('ceph-volume')
+
+ def test_ceph_volume_systemd_is_installed(self, node, host):
+ host.exists('ceph-volume-systemd')
+
+ def _get_osd_id_from_host(self, node, osd_tree):
+ children = []
+ for n in osd_tree['nodes']:
+ if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501
+ children = n['children']
+ return children
+
+ def _get_nb_up_osds_from_ids(self, node, osd_tree):
+ nb_up = 0
+ ids = self._get_osd_id_from_host(node, osd_tree)
+ for n in osd_tree['nodes']:
+ if n['id'] in ids and n['status'] == 'up':
+ nb_up += 1
+ return nb_up
+
+ def test_all_osds_are_up_and_in(self, node, host):
+ cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
+ cluster=node["cluster_name"])
+ output = json.loads(host.check_output(cmd))
+ assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)