]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Fix problems detected by mypy
authorThomas Bechtold <tbechtold@suse.com>
Thu, 9 Jan 2020 07:27:37 +0000 (08:27 +0100)
committerThomas Bechtold <tbechtold@suse.com>
Thu, 5 Mar 2020 05:53:31 +0000 (06:53 +0100)
This is a first step to enable mypy on the qa/ directory.

Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
16 files changed:
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_snapshots.py
qa/tasks/mds_thrash.py
qa/tasks/mgr/test_insights.py
qa/tasks/rbd.py
qa/tasks/restart.py
qa/tasks/rgw.py
qa/tasks/util/rgw.py
qa/tasks/vstart_runner.py
qa/tasks/workunit.py
src/test/rgw/rgw_multi/tests.py
src/test/rgw/rgw_multi/tests_ps.py
src/test/rgw/rgw_multi/zone_cloud.py
src/test/rgw/rgw_multi/zone_es.py
src/test/rgw/rgw_multi/zone_ps.py

index 0110c9c22f47477f38650ec1bb49e462341713d9..7352f12c801be6806a7636e9747e64b39a596259 100644 (file)
@@ -610,8 +610,8 @@ class Filesystem(MDSCluster):
             if pool['pool_name'] == pool_name:
                 if "application_metadata" in pool:
                     if not "cephfs" in pool['application_metadata']:
-                        raise RuntimeError("Pool %p does not name cephfs as application!".\
-                                           format(pool_name))
+                        raise RuntimeError("Pool {pool_name} does not name cephfs as application!".\
+                                           format(pool_name=pool_name))
         
 
     def __del__(self):
index c420936afdc82ab7bb29d6bf57d88df0d5427016..e3f5609afe50c6997d1e748990dfb690a422464e 100644 (file)
@@ -169,10 +169,9 @@ class TestScrubChecks(CephFSTestCase):
         log.info("Cloning repo into place")
         repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
 
-        log.info("Initiating mds_scrub_checks on mds.{id_}, " +
-                 "test_path {path}, run_seq {seq}".format(
-                     id_=mds_rank, path=abs_test_path, seq=run_seq)
-                 )
+        log.info("Initiating mds_scrub_checks on mds.{id_} test_path {path}, run_seq {seq}".format(
+            id_=mds_rank, path=abs_test_path, seq=run_seq)
+        )
 
 
         success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0)
@@ -329,9 +328,8 @@ class TestScrubChecks(CephFSTestCase):
         else:
             jout = None
 
-        log.info("command '{command}' got response code " +
-                 "'{rout}' and stdout '{sout}'".format(
-                     command=command, rout=rout, sout=sout))
+        log.info("command '{command}' got response code '{rout}' and stdout '{sout}'".format(
+            command=command, rout=rout, sout=sout))
 
         success, errstring = validator(jout, rout)
 
@@ -371,7 +369,5 @@ class AsokCommandFailedError(Exception):
         self.errstring = errstring
 
     def __str__(self):
-        return "Admin socket: {command} failed with rc={rc}," + \
-               "json output={json}, because '{es}'".format(
-                   command=self.command, rc=self.rc,
-                   json=self.json, es=self.errstring)
+        return "Admin socket: {command} failed with rc={rc} json output={json}, because '{es}'".format(
+            command=self.command, rc=self.rc, json=self.json, es=self.errstring)
index 067c7b1fb92d148dd23d1f37f783b69a72e9cfed..c3940897a7ba2e6691a6c8ba333cc75743f45f94 100644 (file)
@@ -189,9 +189,9 @@ class TestSnapshots(CephFSTestCase):
         rank1 = self.fs.get_rank(rank=1, status=status)
         self.fs.rank_freeze(True, rank=0)
         self.fs.rank_freeze(True, rank=1)
-        self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8".format(i)], rank=0, status=status)
-        self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3".format(i)], rank=1, status=status)
-        proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4".format(i)], wait=False)
+        self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
+        self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
+        proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
         self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
         self.delete_mds_coredump(rank1['name']);
 
index d8c74250dd53590967f893fd0e1714530fedde58..07ec039ed9cb84e3f5b6ace37097fbb0e08c6a1c 100644 (file)
@@ -203,7 +203,8 @@ class MDSThrasher(Thrasher, Greenlet):
                         pass # no rank present
                     if len(actives) >= max_mds:
                         # no replacement can occur!
-                        self.log("cluster has %d actives (max_mds is %d), no MDS can replace rank %d".format(len(actives), max_mds, rank))
+                        self.log("cluster has {actives} actives (max_mds is {max_mds}), no MDS can replace rank {rank}".format(
+                            actives=len(actives), max_mds=max_mds, rank=rank))
                         return status
                 else:
                     if len(actives) == max_mds:
@@ -303,7 +304,7 @@ class MDSThrasher(Thrasher, Greenlet):
                     self.log(
                         '{label} reported laggy/crashed since: {since}'.format(label=label, since=last_laggy_since))
                 else:
-                    self.log('{label} down, removed from mdsmap'.format(label=label, since=last_laggy_since))
+                    self.log('{label} down, removed from mdsmap'.format(label=label))
 
                 # wait for a standby mds to takeover and become active
                 status = self.wait_for_stable(rank, gid)
index 3f5b9768bbeb0e4171baca53f49758bb688678a5..c483e3abf76e02afada4895b6436efa9a0e06aa8 100644 (file)
@@ -117,7 +117,7 @@ class TestInsights(MgrTestCase):
             check_names.add(unique_check_name)
 
             # and also set the same health check to test deduplication
-            dupe_check_name = "insights_health_check".format(hours)
+            dupe_check_name = "insights_health_check"
             health_check = {
                 dupe_check_name: {
                     "severity": "warning",
index 1962f583f2aebf0630cbc4c65c11bdecbf46438a..faa094dabebbdb7c6aa4d8bd9278b80291f5489a 100644 (file)
@@ -65,7 +65,7 @@ def create_image(ctx, config):
                                                                  size=size))
         args = [
                 'adjust-ulimits',
-                'ceph-coverage'.format(tdir=testdir),
+                'ceph-coverage',
                 '{tdir}/archive/coverage'.format(tdir=testdir),
                 'rbd',
                 '-p', 'rbd',
@@ -140,7 +140,7 @@ def clone_image(ctx, config):
                     ('clone', parent_spec, name)]:
             args = [
                     'adjust-ulimits',
-                    'ceph-coverage'.format(tdir=testdir),
+                    'ceph-coverage',
                     '{tdir}/archive/coverage'.format(tdir=testdir),
                     'rbd', '-p', 'rbd'
                     ]
@@ -165,7 +165,7 @@ def clone_image(ctx, config):
                         ('snap', 'rm', parent_spec)]:
                 args = [
                         'adjust-ulimits',
-                        'ceph-coverage'.format(tdir=testdir),
+                        'ceph-coverage',
                         '{tdir}/archive/coverage'.format(tdir=testdir),
                         'rbd', '-p', 'rbd'
                         ]
index 52b685c9e360a9afd92a8e7ae7629cb802ba0cea..6e33677f002b16b149b8a613d89e1a5f058fb6e7 100644 (file)
@@ -61,7 +61,7 @@ def get_tests(ctx, config, role, remote, testdir):
             run.Raw('&&'),
             'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
             run.Raw('&&'),
-            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+            'find', '-executable', '-type', 'f', '-printf', r'%P\0',
             run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)),
             ],
         )
index aceef8894f8d3d5652a09723ff0958111274e350..df900d2c4505b4cedde25bc115130dfad5dd0aaf 100644 (file)
@@ -147,8 +147,7 @@ def start_rgw(ctx, config, clients):
             run.Raw('|'),
             'sudo',
             'tee',
-            '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
-                                                       client_with_cluster=client_with_cluster),
+            '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
             run.Raw('2>&1'),
             ])
 
index d1ea39d1cd05c203252e0a3fcc166cdba8323584..91652198b8a6f57dc047b503aaf4d859858283c0 100644 (file)
@@ -15,9 +15,9 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
     client_with_id = daemon_type + '.' + client_id
     pre = [
         'adjust-ulimits',
-        'ceph-coverage'.format(tdir=testdir),
+        'ceph-coverage',
         '{tdir}/archive/coverage'.format(tdir=testdir),
-        'radosgw-admin'.format(tdir=testdir),
+        'radosgw-admin',
         '--log-to-stderr',
         '--format', format,
         '-n',  client_with_id,
index ccc706b922b64f8a8ac0ae7a86d79f21fc8af804..8d48152afd6f7f6204246ba828feff8e19f80eac 100644 (file)
@@ -1476,11 +1476,11 @@ def exec_test():
 
         if hasattr(fn, 'is_for_teuthology') and getattr(fn, 'is_for_teuthology') is True:
             drop_test = True
-            log.warn("Dropping test because long running: ".format(method.id()))
+            log.warn("Dropping test because long running: {method_id}".format(method_id=method.id()))
 
         if getattr(fn, "needs_trimming", False) is True:
             drop_test = (os.getuid() != 0)
-            log.warn("Dropping test because client trim unavailable: ".format(method.id()))
+            log.warn("Dropping test because client trim unavailable: {method_id}".format(method_id=method.id()))
 
         if drop_test:
             # Don't drop the test if it was explicitly requested in arguments
index 6e555770063e589a36a845a164ab149227da690d..46eeef41f2e089890268059db9418e508fa3cfa5 100644 (file)
@@ -358,7 +358,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir,
             run.Raw('&&'),
             'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
             run.Raw('&&'),
-            'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+            'find', '-executable', '-type', 'f', '-printf', r'%P\0',
             run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
         ],
     )
index 2ddf88c6170e715b7e9e7ac9dfb0b7410d1a7852..6cada052d3dfcc477552557349cd16104c76d49a 100644 (file)
@@ -80,15 +80,6 @@ def mdlog_list(zone, period = None):
     mdlog_json = mdlog_json.decode('utf-8')
     return json.loads(mdlog_json)
 
-def meta_sync_status(zone):
-    while True:
-        cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
-        meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
-        if retcode == 0:
-            break
-        assert(retcode == 2) # ENOENT
-        time.sleep(5)
-
 def mdlog_autotrim(zone):
     zone.cluster.admin(['mdlog', 'autotrim'])
 
index 9dd037ba320c9fff96d22742877f8a53ff64ed0c..d827ed55f14ed892aba16f416234b749e10876b4 100644 (file)
@@ -159,14 +159,6 @@ class StreamingHTTPServer:
         for worker in self.workers:
             worker.close()
             worker.join()
-    
-    def get_and_reset_events(self):
-        events = []
-        for worker in self.workers:
-            events += worker.get_events()
-            worker.reset_events()
-        return events
-
 
 # AMQP endpoint functions
 
@@ -918,7 +910,7 @@ def test_ps_s3_topic_with_secret_on_master():
     try:
         result = bad_topic_conf.set_config()
     except Exception as err:
-        print 'Error is expected: ' + str(err)
+        print('Error is expected: ' + str(err))
     else:
         assert False, 'user password configuration set allowed only over HTTPS'
     
@@ -1430,7 +1422,7 @@ def test_ps_s3_notification_push_amqp_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     print('wait for 5sec for the messages...')
     time.sleep(5)
@@ -1589,9 +1581,9 @@ def test_ps_s3_notification_push_kafka_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     keys = list(bucket.list())
     receiver.verify_s3_events(keys, exact_match=True)
@@ -1606,9 +1598,9 @@ def test_ps_s3_notification_push_kafka_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     receiver.verify_s3_events(keys, exact_match=True, deletions=True)
     
@@ -1684,10 +1676,10 @@ def kafka_security(security_type):
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     try:
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
         keys = list(bucket.list())
         receiver.verify_s3_events(keys, exact_match=True)
@@ -1702,9 +1694,9 @@ def kafka_security(security_type):
         [thr.join() for thr in client_threads] 
         
         time_diff = time.time() - start_time
-        print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+        print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
         receiver.verify_s3_events(keys, exact_match=True, deletions=True)
     except Exception as err:
@@ -1780,7 +1772,7 @@ def test_ps_s3_notification_multi_delete_on_master():
     start_time = time.time()
     delete_all_objects(zones[0].conn, bucket_name)
     time_diff = time.time() - start_time
-    print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     print('wait for 5sec for the messages...')
     time.sleep(5)
@@ -1864,7 +1856,7 @@ def test_ps_s3_notification_push_http_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     print('wait for 5sec for the messages...')
     time.sleep(5)
index b9ff43ca964878deb6f7ad1307ed73c312d6b5bb..fdfca4f1eca12973af9b6780e2d56282588d48c0 100644 (file)
@@ -140,14 +140,6 @@ class CloudKey:
 
         return r
 
-def append_query_arg(s, n, v):
-    if not v:
-        return s
-    nv = '{n}={v}'.format(n=n, v=v)
-    if not s:
-        return nv
-    return '{s}&{nv}'.format(s=s, nv=nv)
-
 
 class CloudZoneBucket:
     def __init__(self, zone_conn, target_path, name):
index 55edae7292017abf84300ad8b0a9cfae0b5ae7dc..ec9b178fd16889671b01ee9f993a7a75d131a2f6 100644 (file)
@@ -52,13 +52,6 @@ def make_request(conn, method, bucket, key, query_args, headers):
         raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
     return result
 
-def append_query_arg(s, n, v):
-    if not v:
-        return s
-    nv = '{n}={v}'.format(n=n, v=v)
-    if not s:
-        return nv
-    return '{s}&{nv}'.format(s=s, nv=nv)
 
 class MDSearch:
     def __init__(self, conn, bucket_name, query, query_args = None, marker = None):
index af1fb8464391eb3ea888abf013d69f5f4fa61980..e22200e2665eadf9be3b227daa5dd6d89cbec9ad 100644 (file)
@@ -66,13 +66,6 @@ class PSZone(Zone):  # pylint: disable=too-many-ancestors
 NO_HTTP_BODY = ''
 
 
-def print_connection_info(conn):
-    """print connection details"""
-    print('Endpoint: ' + conn.host + ':' + str(conn.port))
-    print('AWS Access Key:: ' + conn.aws_access_key_id)
-    print('AWS Secret Key:: ' + conn.aws_secret_access_key)
-
-
 def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
     """generic request sending to pubsub radogw
     should cover: topics, notificatios and subscriptions
@@ -171,7 +164,7 @@ def delete_all_s3_topics(zone, region):
             print('topic cleanup, deleting: ' + topic['TopicArn'])
             assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
     except Exception as err:
-        print 'failed to do topic cleanup: ' + str(err)
+        print('failed to do topic cleanup: ' + str(err))
     
 
 def delete_all_objects(conn, bucket_name):