]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
tests: introduce `ceph_status` fixture
authorGuillaume Abrioux <gabrioux@ibm.com>
Fri, 20 Jan 2023 10:07:31 +0000 (11:07 +0100)
committerTeoman ONAY <tonay@redhat.com>
Wed, 31 May 2023 21:07:13 +0000 (23:07 +0200)
This avoids some duplicated code in various test_*_is_up() tests

Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
tests/conftest.py
tests/functional/tests/mds/test_mds.py
tests/functional/tests/mgr/test_mgr.py
tests/functional/tests/nfs/test_nfs_ganesha.py
tests/functional/tests/rbd-mirror/test_rbd_mirror.py
tests/functional/tests/rgw/test_rgw.py

index 70e2345dbceb3b5c27c746f7640d650616fd0a8e..89194b02269689e4b7e118302d9c1403a27d549d 100644 (file)
@@ -2,6 +2,28 @@ import pytest
 import os
 
 
+@pytest.fixture
+def ceph_status(host, setup):
+    def _run(keyring,
+             name=None,
+             cluster='ceph',
+             container_binary='podman'):
+        containerized_deployment = setup["containerized_deployment"]
+        container_image = setup["container_image"]
+        client_name = ''
+        if name is not None:
+            client_name = f'-n {name}'
+        ceph_args = f"--connect-timeout 5 {client_name} -k {keyring} --cluster {cluster} -f json -s"
+
+        if containerized_deployment:
+            cmd = f"sudo {container_binary} run --rm -v /etc/ceph:/etc/ceph -v {keyring}:{keyring}:z --entrypoint=ceph {container_image} {ceph_args}"
+        else:
+            cmd = f"ceph {ceph_args}"
+        output = host.check_output(cmd)
+        return output
+    return _run
+
+
 def str_to_bool(val):
     try:
         val = val.lower()
@@ -24,6 +46,11 @@ def setup(host):
     ansible_vars = host.ansible.get_variables()
     ansible_facts = host.ansible("setup")
 
+    containerized_deployment = ansible_vars.get("containerized_deployment", False)
+    ceph_docker_registry = ansible_vars.get("ceph_docker_registry")
+    ceph_docker_image = ansible_vars.get("ceph_docker_image")
+    ceph_docker_image_tag = ansible_vars.get("ceph_docker_image_tag")
+    container_image = f"{ceph_docker_registry}/{ceph_docker_image}:{ceph_docker_image_tag}"
     docker = ansible_vars.get("docker")
     container_binary = ansible_vars.get("container_binary", "")
     osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
@@ -68,6 +95,8 @@ def setup(host):
 
     data = dict(
         cluster_name=cluster_name,
+        containerized_deployment=containerized_deployment,
+        container_image=container_image,
         subnet=subnet,
         osd_ids=osd_ids,
         num_mons=num_mons,
index 0da32c3d23e62f8de0fd8fdeb8c1cff374e3be30..2e1da220aa48139a7f73e58886f300c60d6c78ad 100644 (file)
@@ -16,19 +16,10 @@ class TestMDSs(object):
         assert s.is_enabled
         assert s.is_running
 
-    def test_mds_is_up(self, node, host, setup):
-        hostname = node["vars"]["inventory_hostname"]
-        container_binary = setup['container_binary']
-        if node["docker"]:
-            container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format(  # noqa E501
-                hostname=hostname, container_binary=container_binary)
-        else:
-            container_exec_cmd = ''
-
-        cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
-            container_exec_cmd=container_exec_cmd,
-            cluster=setup['cluster_name']
-        )
-        cluster_status = json.loads(host.check_output(cmd))
+    def test_mds_is_up(self, node, setup, ceph_status):
+        cluster = setup["cluster_name"]
+        name = 'client.bootstrap-mds'
+        output = ceph_status(f'/var/lib/ceph/bootstrap-mds/{cluster}.keyring', name=name)
+        cluster_status = json.loads(output)
         assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get(  # noqa E501
             'up:standby', 0)) == len(node["vars"]["groups"]["mdss"])
index 49f28729ae1483ba3bbda5da820afe258b736f07..a14fed66985afb530b1d191dde62bcc943d4a0d7 100644 (file)
@@ -29,21 +29,11 @@ class TestMGRs(object):
         s = host.socket('tcp://%s:%s' % (setup["address"], port))
         assert s.is_listening
 
-    def test_mgr_is_up(self, node, host, setup):
+    def test_mgr_is_up(self, node, setup, ceph_status):
         hostname = node["vars"]["inventory_hostname"]
         cluster = setup["cluster_name"]
-        container_binary = setup["container_binary"]
-        if node['docker']:
-            container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(  # noqa E501
-                hostname=hostname, container_binary=container_binary)
-        else:
-            container_exec_cmd = ''
-        cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
-            container_exec_cmd=container_exec_cmd,
-            hostname=node["vars"]["inventory_hostname"],
-            cluster=cluster
-        )
-        output_raw = host.check_output(cmd)
+        name = f"mgr.{hostname}"
+        output_raw = ceph_status(f'/var/lib/ceph/mgr/{cluster}-{hostname}/keyring', name=name)
         output_json = json.loads(output_raw)
 
         assert output_json['mgrmap']['available']
index 2b8c0c0356ded037c632a4d299d4eb33d29aedaa..fda75adf79f1a6d38fe9576679cd4129b04d0147 100644 (file)
@@ -23,21 +23,11 @@ class TestNFSs(object):
         assert host.file(
             "/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
 
-    def test_nfs_is_up(self, node, host, setup):
+    def test_nfs_is_up(self, node, setup, ceph_status):
         hostname = node["vars"]["inventory_hostname"]
-        cluster = setup['cluster_name']
-        container_binary = setup["container_binary"]
-        if node['docker']:
-            container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(  # noqa E501
-                hostname=hostname, container_binary=container_binary)
-        else:
-            container_exec_cmd = ''
-        cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
-            container_exec_cmd=container_exec_cmd,
-            hostname=hostname,
-            cluster=cluster
-        )
-        output = host.check_output(cmd)
+        cluster = setup["cluster_name"]
+        name = f"client.rgw.{hostname}"
+        output = ceph_status(f'/var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring', name=name)
         keys = list(json.loads(
             output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys())
         keys.remove('summary')
index eb5b7498167c89669b54f2c439883aa8fcf8791d..12af2e170982199de1b2715983a436077e8e0908 100644 (file)
@@ -19,27 +19,14 @@ class TestRbdMirrors(object):
         assert s.is_running
 
     @pytest.mark.rbdmirror_secondary
-    def test_rbd_mirror_is_up(self, node, host, setup):
+    def test_rbd_mirror_is_up(self, node, setup, ceph_status):
         hostname = node["vars"]["inventory_hostname"]
         cluster = setup["cluster_name"]
-        container_binary = setup["container_binary"]
-        daemons = []
-        if node['docker']:
-            container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(  # noqa E501
-                hostname=hostname, container_binary=container_binary)
-        else:
-            container_exec_cmd = ''
-        hostname = node["vars"]["inventory_hostname"]
-        cluster = setup['cluster_name']
-        cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
-            container_exec_cmd=container_exec_cmd,
-            hostname=hostname,
-            cluster=cluster
-        )
-        output = host.check_output(cmd)
+        output = ceph_status(f'/var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring')
         status = json.loads(output)
         daemon_ids = [i for i in status["servicemap"]["services"]
                       ["rbd-mirror"]["daemons"].keys() if i != "summary"]
+        daemons = []
         for daemon_id in daemon_ids:
             daemons.append(status["servicemap"]["services"]["rbd-mirror"]
                            ["daemons"][daemon_id]["metadata"]["hostname"])
index 07cadd70530bbb9df8a4541c6d25f27eb8d412f6..8a45be0c78cb4893555082b4665e70044ecdebae 100644 (file)
@@ -21,21 +21,11 @@ class TestRGWs(object):
             assert s.is_enabled
             assert s.is_running
 
-    def test_rgw_is_up(self, node, host, setup):
+    def test_rgw_is_up(self, node, setup, ceph_status):
         hostname = node["vars"]["inventory_hostname"]
         cluster = setup["cluster_name"]
-        container_binary = setup["container_binary"]
-        if node['docker']:
-            container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}-rgw0'.format(  # noqa E501
-                hostname=hostname, container_binary=container_binary)
-        else:
-            container_exec_cmd = ''
-        cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
-            container_exec_cmd=container_exec_cmd,
-            hostname=hostname,
-            cluster=cluster
-        )
-        output = host.check_output(cmd)
+        name = "client.bootstrap-rgw"
+        output = ceph_status(f'/var/lib/ceph/bootstrap-rgw/{cluster}.keyring', name=name)
         keys = list(json.loads(
             output)["servicemap"]["services"]["rgw"]["daemons"].keys())
         keys.remove('summary')
@@ -43,6 +33,7 @@ class TestRGWs(object):
         hostnames = []
         for key in keys:
             hostnames.append(daemons[key]['metadata']['hostname'])
+        assert hostname in hostnames
 
     @pytest.mark.no_docker
     def test_rgw_http_endpoint(self, node, host, setup):