]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
testinfra: linting
authorSébastien Han <seb@redhat.com>
Mon, 19 Nov 2018 10:09:30 +0000 (11:09 +0100)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Tue, 27 Nov 2018 16:47:40 +0000 (16:47 +0000)
Make flake8 happy on the testinfra files.

Signed-off-by: Sébastien Han <seb@redhat.com>
12 files changed:
tests/conftest.py
tests/functional/tests/iscsi/test_iscsi.py
tests/functional/tests/mds/test_mds.py
tests/functional/tests/mgr/test_mgr.py
tests/functional/tests/mon/test_mons.py
tests/functional/tests/nfs/test_nfs_ganesha.py
tests/functional/tests/osd/test_journal_collocation.py
tests/functional/tests/osd/test_osds.py
tests/functional/tests/rbd-mirror/test_rbd_mirror.py
tests/functional/tests/rgw/test_rgw.py
tests/functional/tests/rgw/test_rgw_tuning.py
tests/functional/tests/test_install.py

index f517961fb612a68b0d423e668aad7634ceda18ab..a7ebfd9b44b33b613205d51d22c63c4507ff2144 100644 (file)
@@ -39,9 +39,11 @@ def node(host, request):
         if marker.name in group_names or marker.name == 'all':
             test_is_applicable = True
             break
-    # Check if any markers on the test method exist in the nodes group_names. If they do not, this test is not valid for the node being tested.
+    # Check if any markers on the test method exist in the nodes group_names.
+    # If they do not, this test is not valid for the node being tested.
     if not test_is_applicable:
-        reason = "%s: Not a valid test for node type: %s" % (request.function, group_names)
+        reason = "%s: Not a valid test for node type: %s" % (
+            request.function, group_names)
         pytest.skip(reason)
 
     if request.node.get_closest_marker("no_lvm_scenario") and lvm_scenario:
index e023c75fe838c709e3fa501b823fadda27e5b1ca..ebbb543515151ca875161f32d627efec81321605 100644 (file)
@@ -1,5 +1,4 @@
 import pytest
-import json
 
 
 class TestiSCSIs(object):
index f0c3f32cc1ffd1a00d131693b4474753f2c05a52..cfdf403e48320f340596a6e6064d79caae18c7d2 100644 (file)
@@ -1,6 +1,7 @@
 import pytest
 import json
 
+
 class TestMDSs(object):
 
     @pytest.mark.no_docker
@@ -25,14 +26,15 @@ class TestMDSs(object):
             container_binary = 'docker'
             if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
                 container_binary = 'podman'
-            docker_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format(
+            docker_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format(  # noqa E501
                 hostname=hostname, container_binary=container_binary)
         else:
             docker_exec_cmd = ''
 
-        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
+        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
             docker_exec_cmd=docker_exec_cmd,
             cluster=node['cluster_name']
         )
         cluster_status = json.loads(host.check_output(cmd))
-        assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get('up:standby', 0)) == len(node["vars"]["groups"]["mdss"])
+        assert (cluster_status['fsmap'].get('up', 0) + cluster_status['fsmap'].get(  # noqa E501
+            'up:standby', 0)) == len(node["vars"]["groups"]["mdss"])
index c0eadd991d7857ef7cf6044fd532aa80311327be..5783e96d74f38828c6a1a373c2ffe8fe9163b20b 100644 (file)
@@ -1,6 +1,7 @@
 import pytest
 import json
 
+
 class TestMGRs(object):
 
     @pytest.mark.no_docker
@@ -26,11 +27,11 @@ class TestMGRs(object):
             container_binary = 'docker'
             if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
                 container_binary = 'podman'
-            docker_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(
+            docker_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(  # noqa E501
                 hostname=hostname, container_binary=container_binary)
         else:
             docker_exec_cmd = ''
-        cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
+        cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
             docker_exec_cmd=docker_exec_cmd,
             hostname=node["vars"]["inventory_hostname"],
             cluster=cluster
index e06e18cc8b72d212974b810d04136d6a43a8722c..35961074340456465d203e382a08fbdd602e568d 100644 (file)
@@ -1,6 +1,7 @@
 import pytest
 import re
 
+
 class TestMons(object):
 
     @pytest.mark.no_docker
@@ -24,7 +25,7 @@ class TestMons(object):
 
     @pytest.mark.no_docker
     def test_can_get_cluster_health(self, node, host):
-        cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])
+        cmd = "sudo ceph --cluster={} --connect-timeout 5 -s".format(node["cluster_name"])  # noqa E501
         output = host.check_output(cmd)
         assert output.strip().startswith("cluster")
 
@@ -32,11 +33,10 @@ class TestMons(object):
         assert File(node["conf_path"]).contains("^mon initial members = .*$")
 
     def test_initial_members_line_has_correct_value(self, node, host, File):
-        mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
+        mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))  # noqa E501
         result = True
         for host in node["vars"]["groups"]["mons"]:
             pattern = re.compile(host)
-            if pattern.search(mon_initial_members_line) == None:
+            if pattern.search(mon_initial_members_line) == None:  # noqa E501
                 result = False
                 assert result
-
index 0a931b4c224ebc614b1ed15ff5b769ac1556479e..4ffd6a7709669f7d488c93f9a828d778f3d5d46b 100644 (file)
@@ -1,6 +1,7 @@
 import json
 import pytest
 
+
 class TestNFSs(object):
 
     @pytest.mark.no_docker
@@ -21,7 +22,8 @@ class TestNFSs(object):
 
     @pytest.mark.no_docker
     def test_nfs_config_override(self, node, host):
-        assert host.file("/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
+        assert host.file(
+            "/etc/ganesha/ganesha.conf").contains("Entries_HWMark")
 
     def test_nfs_is_up(self, node, host):
         hostname = node["vars"]["inventory_hostname"]
@@ -30,20 +32,21 @@ class TestNFSs(object):
             container_binary = 'docker'
             if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
                 container_binary = 'podman'
-            docker_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(
+            docker_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(  # noqa E501
                 hostname=hostname, container_binary=container_binary)
         else:
             docker_exec_cmd = ''
-        cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
+        cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
             docker_exec_cmd=docker_exec_cmd,
             hostname=hostname,
             cluster=cluster
         )
         output = host.check_output(cmd)
-        daemons = [i for i in json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"]]
+        daemons = [i for i in json.loads(
+            output)["servicemap"]["services"]["rgw-nfs"]["daemons"]]
         assert hostname in daemons
 
-#NOTE (guits): This check must be fixed. (Permission denied error)
+# NOTE (guits): This check must be fixed. (Permission denied error)
 #    @pytest.mark.no_docker
 #    def test_nfs_rgw_fsal_export(self, node, host):
 #        if(host.mount_point("/mnt").exists):
index 96c18e3799281ffcb64f92047f9af51ce380f24e..17396381a7fced8abd16b1161874d4bb93e3c0d8 100644 (file)
@@ -2,9 +2,9 @@
 class TestOSD(object):
 
     def test_osds_are_all_collocated(self, node, host):
-        # TODO: figure out way to paramaterize node['vars']['devices'] for this test
+        # TODO: figure out way to paramaterize node['vars']['devices'] for this test  # noqa E501
         osd_auto_discovery = node["vars"].get('osd_auto_discovery', False)
         if osd_auto_discovery:
-            node["vars"]["devices"] = ["/dev/sda", "/dev/sdb", "/dev/sdc"] # Hardcoded since we can't retrieve the devices list generated during playbook run
+            node["vars"]["devices"] = ["/dev/sda", "/dev/sdb", "/dev/sdc"]  # Hardcoded since we can't retrieve the devices list generated during playbook run  # noqa E501
         for device in node["vars"]["devices"]:
-            assert host.check_output("sudo blkid -s PARTLABEL -o value $(readlink -f %s)2" % device) in ["ceph journal", "ceph block"]
+            assert host.check_output("sudo blkid -s PARTLABEL -o value $(readlink -f %s)2" % device) in ["ceph journal", "ceph block"]  # noqa E501
index 2182b5d5d031f8e71999994d459a0740347ed97b..2e526fd9acb42d9e3040e0507201a5d59ad4c2d8 100644 (file)
@@ -12,12 +12,14 @@ class TestOSDs(object):
     def test_osds_listen_on_public_network(self, node, host):
         # TODO: figure out way to paramaterize this test
         nb_port = (node["num_osds"] * 2)
-        assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port)
+        assert host.check_output(
+            "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port)  # noqa E501
 
     def test_osds_listen_on_cluster_network(self, node, host):
         # TODO: figure out way to paramaterize this test
         nb_port = (node["num_osds"] * 2)
-        assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["cluster_address"])) == str(nb_port)
+        assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" %  # noqa E501
+                                 (node["cluster_address"])) == str(nb_port)
 
     def test_osd_services_are_running(self, node, host):
         # TODO: figure out way to paramaterize node['osds'] for this test
@@ -51,7 +53,7 @@ class TestOSDs(object):
     def _get_osd_id_from_host(self, node, osd_tree):
         children = []
         for n in osd_tree['nodes']:
-            if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host':
+            if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host':  # noqa E501
                 children = n['children']
         return children
 
@@ -65,7 +67,8 @@ class TestOSDs(object):
 
     @pytest.mark.no_docker
     def test_all_osds_are_up_and_in(self, node, host):
-        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(cluster=node["cluster_name"])
+        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(  # noqa E501
+            cluster=node["cluster_name"])
         output = json.loads(host.check_output(cmd))
         assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
 
@@ -76,7 +79,7 @@ class TestOSDs(object):
             container_binary = 'podman'
         osd_id = host.check_output(os.path.join(
             container_binary + " ps -q --filter='name=ceph-osd' | head -1"))
-        cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
+        cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(  # noqa E501
             osd_id=osd_id,
             cluster=node["cluster_name"],
             container_binary=container_binary
index eaa43b810dcd4770682000e12c65744f54fbd6af..f51e3283a20b16a8250140e722fdf1f73bc99041 100644 (file)
@@ -1,6 +1,6 @@
 import pytest
 import json
-import os
+
 
 class TestRbdMirrors(object):
 
@@ -35,20 +35,22 @@ class TestRbdMirrors(object):
             container_binary = 'docker'
             if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
                 container_binary = 'podman'
-            docker_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(
+            docker_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(  # noqa E501
                 hostname=hostname, container_binary=container_binary)
         else:
             docker_exec_cmd = ''
         hostname = node["vars"]["inventory_hostname"]
         cluster = node['cluster_name']
-        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
+        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd-mirror --keyring /var/lib/ceph/bootstrap-rbd-mirror/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
             docker_exec_cmd=docker_exec_cmd,
             hostname=hostname,
             cluster=cluster
         )
         output = host.check_output(cmd)
         status = json.loads(output)
-        daemon_ids = [i for i in status["servicemap"]["services"]["rbd-mirror"]["daemons"].keys() if i != "summary"]
+        daemon_ids = [i for i in status["servicemap"]["services"]
+                      ["rbd-mirror"]["daemons"].keys() if i != "summary"]
         for daemon_id in daemon_ids:
-            daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"])
-        assert hostname in daemons
\ No newline at end of file
+            daemons.append(status["servicemap"]["services"]["rbd-mirror"]
+                           ["daemons"][daemon_id]["metadata"]["hostname"])
+        assert hostname in daemons
index 43bbcb274cd082f27279648ec96d8fc962b6caa2..98f5c51e97b40921f72e2d4740eaf2099ee3a220 100644 (file)
@@ -1,6 +1,7 @@
 import pytest
 import json
 
+
 class TestRGWs(object):
 
     @pytest.mark.no_docker
@@ -29,21 +30,23 @@ class TestRGWs(object):
             container_binary = 'docker'
             if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
                 container_binary = 'podman'
-            docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format(
+            docker_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format(  # noqa E501
                 hostname=hostname, container_binary=container_binary)
         else:
             docker_exec_cmd = ''
-        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
+        cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(  # noqa E501
             docker_exec_cmd=docker_exec_cmd,
             hostname=hostname,
             cluster=cluster
         )
         output = host.check_output(cmd)
-        daemons = [i for i in json.loads(output)["servicemap"]["services"]["rgw"]["daemons"]]
+        daemons = [i for i in json.loads(
+            output)["servicemap"]["services"]["rgw"]["daemons"]]
         assert hostname in daemons
 
     @pytest.mark.no_docker
     def test_rgw_http_endpoint(self, node, host):
         # rgw frontends ip_addr is configured on eth1
         ip_addr = host.interface("eth1").addresses[0]
-        assert host.socket("tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=8080)).is_listening
+        assert host.socket(
+            "tcp://{ip_addr}:{port}".format(ip_addr=ip_addr, port=8080)).is_listening  # noqa E501
index 69c87a6ad3a487ea80b43bafad919c48d9d59a3e..3dff9cea64016cba2f38f6fbe6400cb1879e8538 100644 (file)
@@ -6,23 +6,25 @@ class TestRGWs(object):
 
     @pytest.mark.no_docker
     def test_rgw_bucket_default_quota_is_set(self, node, host):
-        assert host.file(node["conf_path"]).contains("rgw override bucket index max shards")
-        assert host.file(node["conf_path"]).contains("rgw bucket default quota max objects")
+        assert host.file(node["conf_path"]).contains(
+            "rgw override bucket index max shards")
+        assert host.file(node["conf_path"]).contains(
+            "rgw bucket default quota max objects")
 
     @pytest.mark.no_docker
     def test_rgw_bucket_default_quota_is_applied(self, node, host):
-        radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring user create --uid=test --display-name Test".format(
+        radosgw_admin_cmd = "sudo radosgw-admin --cluster={cluster} -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring user create --uid=test --display-name Test".format(  # noqa E501
             hostname=node["vars"]["inventory_hostname"],
             cluster=node['cluster_name']
         )
         radosgw_admin_output = host.check_output(radosgw_admin_cmd)
         radosgw_admin_output_json = json.loads(radosgw_admin_output)
-        assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True
-        assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400
+        assert radosgw_admin_output_json["bucket_quota"]["enabled"] == True  # noqa E501
+        assert radosgw_admin_output_json["bucket_quota"]["max_objects"] == 1638400  # noqa E501
 
     @pytest.mark.no_docker
     def test_rgw_tuning_pools_are_set(self, node, host):
-        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
+        cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 -n client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(  # noqa E501
             hostname=node["vars"]["inventory_hostname"],
             cluster=node['cluster_name']
         )
@@ -40,14 +42,14 @@ class TestRGWs(object):
         container_binary = 'docker'
         if host.exists('podman') and host.ansible("setup")["ansible_facts"]["ansible_distribution"] == 'Fedora':  # noqa E501
             container_binary = 'podman'
-        cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring  osd dump".format(
+        cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring  osd dump".format(  # noqa E501
             hostname=hostname,
             cluster=cluster,
             container_binary=container_binary
         )
         output = host.check_output(cmd)
         pools = node["vars"].get("rgw_create_pools")
-        if pools == None:
+        if pools is None:
             pytest.skip('rgw_create_pools not defined, nothing to test')
         for pool_name, pg_num in pools.items():
             assert pool_name in output
index 4300ab2b4fb20766adebe131e1bb14e52ead2576..7d7a2a9bbe74bcacfd20cd8810526b4477c68031 100644 (file)
@@ -1,6 +1,7 @@
 import pytest
 import re
 
+
 class TestInstall(object):
 
     def test_ceph_dir_exists(self, host, node):
@@ -26,10 +27,10 @@ class TestCephConf(object):
         assert File(node["conf_path"]).contains("^mon host = .*$")
 
     def test_mon_host_line_has_correct_value(self, node, host):
-        mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
-        result=True
+        mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))  # noqa E501
+        result = True
         for x in range(0, node["num_mons"]):
-            pattern=re.compile(("{}.1{}".format(node["subnet"], x)))
-            if pattern.search(mon_host_line) == None:
-                result=False
+            pattern = re.compile(("{}.1{}".format(node["subnet"], x)))
+            if pattern.search(mon_host_line) is None:
+                result = False
             assert result