This is a first step to enable mypy on the qa/ directory.
Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
if pool['pool_name'] == pool_name:
if "application_metadata" in pool:
if not "cephfs" in pool['application_metadata']:
- raise RuntimeError("Pool %p does not name cephfs as application!".\
- format(pool_name))
+ raise RuntimeError("Pool {pool_name} does not name cephfs as application!".\
+ format(pool_name=pool_name))
def __del__(self):
log.info("Cloning repo into place")
repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
- log.info("Initiating mds_scrub_checks on mds.{id_}, " +
- "test_path {path}, run_seq {seq}".format(
- id_=mds_rank, path=abs_test_path, seq=run_seq)
- )
+ log.info("Initiating mds_scrub_checks on mds.{id_} test_path {path}, run_seq {seq}".format(
+ id_=mds_rank, path=abs_test_path, seq=run_seq)
+ )
success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0)
else:
jout = None
- log.info("command '{command}' got response code " +
- "'{rout}' and stdout '{sout}'".format(
- command=command, rout=rout, sout=sout))
+ log.info("command '{command}' got response code '{rout}' and stdout '{sout}'".format(
+ command=command, rout=rout, sout=sout))
success, errstring = validator(jout, rout)
self.errstring = errstring
def __str__(self):
- return "Admin socket: {command} failed with rc={rc}," + \
- "json output={json}, because '{es}'".format(
- command=self.command, rc=self.rc,
- json=self.json, es=self.errstring)
+ return "Admin socket: {command} failed with rc={rc} json output={json}, because '{es}'".format(
+ command=self.command, rc=self.rc, json=self.json, es=self.errstring)
rank1 = self.fs.get_rank(rank=1, status=status)
self.fs.rank_freeze(True, rank=0)
self.fs.rank_freeze(True, rank=1)
- self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8".format(i)], rank=0, status=status)
- self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3".format(i)], rank=1, status=status)
- proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4".format(i)], wait=False)
+ self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8"], rank=0, status=status)
+ self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3"], rank=1, status=status)
+ proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4"], wait=False)
self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2);
self.delete_mds_coredump(rank1['name']);
pass # no rank present
if len(actives) >= max_mds:
# no replacement can occur!
- self.log("cluster has %d actives (max_mds is %d), no MDS can replace rank %d".format(len(actives), max_mds, rank))
+ self.log("cluster has {actives} actives (max_mds is {max_mds}), no MDS can replace rank {rank}".format(
+ actives=len(actives), max_mds=max_mds, rank=rank))
return status
else:
if len(actives) == max_mds:
self.log(
'{label} reported laggy/crashed since: {since}'.format(label=label, since=last_laggy_since))
else:
- self.log('{label} down, removed from mdsmap'.format(label=label, since=last_laggy_since))
+ self.log('{label} down, removed from mdsmap'.format(label=label))
# wait for a standby mds to takeover and become active
status = self.wait_for_stable(rank, gid)
check_names.add(unique_check_name)
# and also set the same health check to test deduplication
- dupe_check_name = "insights_health_check".format(hours)
+ dupe_check_name = "insights_health_check"
health_check = {
dupe_check_name: {
"severity": "warning",
size=size))
args = [
'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd',
'-p', 'rbd',
('clone', parent_spec, name)]:
args = [
'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd', '-p', 'rbd'
]
('snap', 'rm', parent_spec)]:
args = [
'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'rbd', '-p', 'rbd'
]
run.Raw('&&'),
'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
run.Raw('&&'),
- 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+ 'find', '-executable', '-type', 'f', '-printf', r'%P\0',
run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)),
],
)
run.Raw('|'),
'sudo',
'tee',
- '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
- client_with_cluster=client_with_cluster),
+ '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(client_with_cluster=client_with_cluster),
run.Raw('2>&1'),
])
client_with_id = daemon_type + '.' + client_id
pre = [
'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin'.format(tdir=testdir),
+ 'radosgw-admin',
'--log-to-stderr',
'--format', format,
'-n', client_with_id,
if hasattr(fn, 'is_for_teuthology') and getattr(fn, 'is_for_teuthology') is True:
drop_test = True
- log.warn("Dropping test because long running: ".format(method.id()))
+ log.warn("Dropping test because long running: {method_id}".format(method_id=method.id()))
if getattr(fn, "needs_trimming", False) is True:
drop_test = (os.getuid() != 0)
- log.warn("Dropping test because client trim unavailable: ".format(method.id()))
+ log.warn("Dropping test because client trim unavailable: {method_id}".format(method_id=method.id()))
if drop_test:
# Don't drop the test if it was explicitly requested in arguments
run.Raw('&&'),
'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
run.Raw('&&'),
- 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+ 'find', '-executable', '-type', 'f', '-printf', r'%P\0',
run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
],
)
mdlog_json = mdlog_json.decode('utf-8')
return json.loads(mdlog_json)
-def meta_sync_status(zone):
- while True:
- cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
- meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
- if retcode == 0:
- break
- assert(retcode == 2) # ENOENT
- time.sleep(5)
-
def mdlog_autotrim(zone):
zone.cluster.admin(['mdlog', 'autotrim'])
for worker in self.workers:
worker.close()
worker.join()
-
- def get_and_reset_events(self):
- events = []
- for worker in self.workers:
- events += worker.get_events()
- worker.reset_events()
- return events
-
# AMQP endpoint functions
try:
result = bad_topic_conf.set_config()
except Exception as err:
- print 'Error is expected: ' + str(err)
+ print('Error is expected: ' + str(err))
else:
assert False, 'user password configuration set allowed only over HTTPS'
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
try:
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
keys = list(bucket.list())
receiver.verify_s3_events(keys, exact_match=True)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
receiver.verify_s3_events(keys, exact_match=True, deletions=True)
except Exception as err:
start_time = time.time()
delete_all_objects(zones[0].conn, bucket_name)
time_diff = time.time() - start_time
- print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
print('wait for 5sec for the messages...')
time.sleep(5)
return r
-def append_query_arg(s, n, v):
- if not v:
- return s
- nv = '{n}={v}'.format(n=n, v=v)
- if not s:
- return nv
- return '{s}&{nv}'.format(s=s, nv=nv)
-
class CloudZoneBucket:
def __init__(self, zone_conn, target_path, name):
raise boto.exception.S3ResponseError(result.status, result.reason, result.read())
return result
-def append_query_arg(s, n, v):
- if not v:
- return s
- nv = '{n}={v}'.format(n=n, v=v)
- if not s:
- return nv
- return '{s}&{nv}'.format(s=s, nv=nv)
class MDSearch:
def __init__(self, conn, bucket_name, query, query_args = None, marker = None):
NO_HTTP_BODY = ''
-def print_connection_info(conn):
- """print connection details"""
- print('Endpoint: ' + conn.host + ':' + str(conn.port))
- print('AWS Access Key:: ' + conn.aws_access_key_id)
- print('AWS Secret Key:: ' + conn.aws_secret_access_key)
-
-
def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
"""generic request sending to pubsub radogw
should cover: topics, notificatios and subscriptions
print('topic cleanup, deleting: ' + topic['TopicArn'])
assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
except Exception as err:
- print 'failed to do topic cleanup: ' + str(err)
+ print('failed to do topic cleanup: ' + str(err))
def delete_all_objects(conn, bucket_name):