From: Kyr Shatskyy Date: Mon, 7 Oct 2019 14:09:05 +0000 (+0200) Subject: tests: use python3 compatible print X-Git-Tag: v14.2.10~17^2~109 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=45bdf72dd7af022ac44b23e31ed801b5460bc567;p=ceph.git tests: use python3 compatible print Fixes: https://tracker.ceph.com/issues/42210 Signed-off-by: Kyr Shatskyy (cherry picked from commit 7e87f80a8754c93c10ff937844ad456d4a236f57) Conflicts: qa/tasks/cephfs/mount.py qa/tasks/cephfs/test_journal_migration.py qa/workunits/mon/caps.py src/test/rgw/rgw_multi/tests_ps.py src/test/rgw/rgw_multi/zone_ps.py src/test/rgw/test_multi.py: trivial resolutions --- diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 72da3c606135a..d2301a5fe2fa6 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -138,7 +138,7 @@ class Thrasher: """ Implement log behavior """ - print x + print(x) self.log = tmp if self.config is None: self.config = dict() @@ -1121,7 +1121,7 @@ class CephManager: """ implement log behavior. """ - print x + print(x) self.log = tmp if self.config is None: self.config = dict() diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py index 56a39790759c2..61d28e38ee52b 100644 --- a/qa/tasks/cephfs/fuse_mount.py +++ b/qa/tasks/cephfs/fuse_mount.py @@ -427,7 +427,7 @@ def find_socket(client_name): return f raise RuntimeError("Client socket {{0}} not found".format(client_name)) -print find_socket("{client_name}") +print(find_socket("{client_name}")) """.format( asok_path=self._asok_path(), client_name="client.{0}".format(self.client_id)) diff --git a/qa/tasks/cephfs/kernel_mount.py b/qa/tasks/cephfs/kernel_mount.py index f79c1a27e5a77..c5f6dab8aed86 100644 --- a/qa/tasks/cephfs/kernel_mount.py +++ b/qa/tasks/cephfs/kernel_mount.py @@ -208,7 +208,7 @@ class KernelMount(CephFSMount): result[client_id] = dir return result - print json.dumps(get_id_to_dir()) + print(json.dumps(get_id_to_dir())) """) p = self.client_remote.run(args=[ @@ -230,7 +230,7 @@ class KernelMount(CephFSMount): pyscript = dedent(""" import os - print open(os.path.join("{debug_dir}", "{filename}")).read() + print(open(os.path.join("{debug_dir}", "{filename}")).read()) """).format(debug_dir=debug_dir, filename=filename) p = self.client_remote.run(args=[ diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py index bbaf0e510d949..339e028016201 100644 --- a/qa/tasks/cephfs/mount.py +++ b/qa/tasks/cephfs/mount.py @@ -554,9 +554,9 @@ class CephFSMount(object): sys.exit(e.errno) attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"] - print json.dumps( + print(json.dumps( dict([(a, getattr(s, a)) for a in attrs]), - indent=2) + indent=2)) """).format(path=abs_path) proc = self._run_python(pyscript) if wait: @@ -596,14 +596,14 @@ class CephFSMount(object): import os import stat - print os.stat("{path}").st_ino + print(os.stat("{path}").st_ino) """).format(path=abs_path) else: pyscript = dedent(""" import os import stat - print os.lstat("{path}").st_ino + print(os.lstat("{path}").st_ino) """).format(path=abs_path) proc = self._run_python(pyscript) @@ -617,7 +617,7 @@ class CephFSMount(object): import os import stat - print os.stat("{path}").st_nlink + print(os.stat("{path}").st_nlink) """).format(path=abs_path) proc = self._run_python(pyscript) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 73bd815c4f70a..bdd8e1388d2ad 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -472,22 +472,22 @@ class TestClientRecovery(CephFSTestCase): path = "{path}" - print "Starting creation..." + print("Starting creation...") start = time.time() os.mkdir(path) dfd = os.open(path, os.O_DIRECTORY) fd = open(os.path.join(path, "childfile"), "w") - print "Finished creation in {{0}}s".format(time.time() - start) + print("Finished creation in {{0}}s".format(time.time() - start)) - print "Starting fsync..." + print("Starting fsync...") start = time.time() if {dirfsync}: os.fsync(dfd) else: os.fsync(fd) - print "Finished fsync in {{0}}s".format(time.time() - start) + print("Finished fsync in {{0}}s".format(time.time() - start)) """.format(path=path,dirfsync=str(dirfsync))) ) @@ -570,7 +570,7 @@ class TestClientRecovery(CephFSTestCase): cephfs.mount() client_id = cephfs.get_instance_id() cephfs.abort_conn() - print client_id + print(client_id) """) ) gid = int(gid_str); diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index b0f85e3213f62..20a079d72d7de 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -202,7 +202,7 @@ class TestForwardScrub(CephFSTestCase): inotable_dict = {} for rank in ranks: inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable" - print "Trying to fetch inotable object: " + inotable_oid + print("Trying to fetch inotable object: " + inotable_oid) #self.fs.get_metadata_object("InoTable", "mds0_inotable") inotable_raw = self.fs.get_metadata_object_raw(inotable_oid) diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py index ea63dfc6f94d9..d51e24794e029 100644 --- a/qa/tasks/cephfs/test_full.py +++ b/qa/tasks/cephfs/test_full.py @@ -229,12 +229,12 @@ class FullnessTestCase(CephFSTestCase): import os # Write some buffered data through before going full, all should be well - print "writing some data through which we expect to succeed" + print("writing some data through which we expect to succeed") bytes = 0 f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT) bytes += os.write(f, 'a' * 512 * 1024) os.fsync(f) - print "fsync'ed data successfully, will now attempt to fill fs" + print("fsync'ed data successfully, will now attempt to fill fs") # Okay, now we're going to fill up the filesystem, and then keep # writing until we see an error from fsync. As long as we're doing @@ -244,26 +244,26 @@ class FullnessTestCase(CephFSTestCase): for n in range(0, int({fill_mb} * 0.9)): bytes += os.write(f, 'x' * 1024 * 1024) - print "wrote {{0}} bytes via buffered write, may repeat".format(bytes) - print "done writing {{0}} bytes".format(bytes) + print("wrote {{0}} bytes via buffered write, may repeat".format(bytes)) + print("done writing {{0}} bytes".format(bytes)) # OK, now we should sneak in under the full condition # due to the time it takes the OSDs to report to the # mons, and get a successful fsync on our full-making data os.fsync(f) - print "successfully fsync'ed prior to getting full state reported" + print("successfully fsync'ed prior to getting full state reported") # buffered write, add more dirty data to the buffer - print "starting buffered write" + print("starting buffered write") try: for n in range(0, int({fill_mb} * 0.2)): bytes += os.write(f, 'x' * 1024 * 1024) - print "sleeping a bit as we've exceeded 90% of our expected full ratio" + print("sleeping a bit as we've exceeded 90% of our expected full ratio") time.sleep({full_wait}) except OSError: pass; - print "wrote, now waiting 30s and then doing a close we expect to fail" + print("wrote, now waiting 30s and then doing a close we expect to fail") # Wait long enough for a background flush that should fail time.sleep(30) @@ -273,7 +273,7 @@ class FullnessTestCase(CephFSTestCase): try: os.close(f) except OSError: - print "close() returned an error as expected" + print("close() returned an error as expected") else: raise RuntimeError("close() failed to raise error") else: @@ -300,12 +300,12 @@ class FullnessTestCase(CephFSTestCase): import os # Write some buffered data through before going full, all should be well - print "writing some data through which we expect to succeed" + print("writing some data through which we expect to succeed") bytes = 0 f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT) bytes += os.write(f, 'a' * 4096) os.fsync(f) - print "fsync'ed data successfully, will now attempt to fill fs" + print("fsync'ed data successfully, will now attempt to fill fs") # Okay, now we're going to fill up the filesystem, and then keep # writing until we see an error from fsync. As long as we're doing @@ -316,25 +316,25 @@ class FullnessTestCase(CephFSTestCase): for n in range(0, int({fill_mb} * 1.1)): try: bytes += os.write(f, 'x' * 1024 * 1024) - print "wrote bytes via buffered write, moving on to fsync" + print("wrote bytes via buffered write, moving on to fsync") except OSError as e: - print "Unexpected error %s from write() instead of fsync()" % e + print("Unexpected error %s from write() instead of fsync()" % e) raise try: os.fsync(f) - print "fsync'ed successfully" + print("fsync'ed successfully") except OSError as e: - print "Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0)) + print("Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))) full = True break else: - print "Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0)) + print("Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))) if n > {fill_mb} * 0.9: # Be cautious in the last region where we expect to hit # the full condition, so that we don't overshoot too dramatically - print "sleeping a bit as we've exceeded 90% of our expected full ratio" + print("sleeping a bit as we've exceeded 90% of our expected full ratio") time.sleep({full_wait}) if not full: @@ -343,9 +343,9 @@ class FullnessTestCase(CephFSTestCase): # close() should not raise an error because we already caught it in # fsync. There shouldn't have been any more writeback errors # since then because all IOs got cancelled on the full flag. - print "calling close" + print("calling close") os.close(f) - print "close() did not raise error" + print("close() did not raise error") os.unlink("{file_path}") """) diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index d465fcb471760..d6faa03e0ed37 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -131,7 +131,7 @@ class usage_acc: for b in e['buckets']: c = b['categories'] if b['bucket'] == 'nosuchbucket': - print "got here" + print("got here") try: b2 = self.e2b(e2, b['bucket'], False) if b2 != None: @@ -232,7 +232,7 @@ def create_presigned_url(conn, method, bucket_name, key_name, expiration): def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False): url = create_presigned_url(conn, method, bucket_name, key_name, 3600) - print url + print(url) h = httplib2.Http() h.follow_redirects = follow_redirects return h.request(url, method) diff --git a/qa/tasks/resolve_stuck_peering.py b/qa/tasks/resolve_stuck_peering.py index 9b31343f2784b..d140544c4945a 100644 --- a/qa/tasks/resolve_stuck_peering.py +++ b/qa/tasks/resolve_stuck_peering.py @@ -82,7 +82,7 @@ def task(ctx, config): pgnum=0 pgstr = manager.get_pgid(pool, pgnum) stats = manager.get_single_pg_stats(pgstr) - print stats['state'] + print(stats['state']) timeout=60 start=time.time() @@ -100,7 +100,7 @@ def task(ctx, config): #expect the pg status to be active+undersized+degraded #pg should recover and become active+clean within timeout stats = manager.get_single_pg_stats(pgstr) - print stats['state'] + print(stats['state']) timeout=10 start=time.time() diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py index b0c4ede60028b..239be7cb9971d 100644 --- a/qa/tasks/s3a_hadoop.py +++ b/qa/tasks/s3a_hadoop.py @@ -165,7 +165,7 @@ conn = boto.connect_s3( ) bucket = conn.create_bucket('{bucket_name}') for bucket in conn.get_all_buckets(): - print bucket.name + "\t" + bucket.creation_date + print(bucket.name + "\t" + bucket.creation_date) """.format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name) py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir) misc.sudo_write_file( diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py index 14d5103e19f23..a8525b73bbda6 100644 --- a/qa/tasks/scrub.py +++ b/qa/tasks/scrub.py @@ -82,7 +82,7 @@ class Scrubber: else: def tmp(x): """Local display""" - print x + print(x) self.log = tmp self.stopping = False diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index e83bcad9e7e60..454bea37da6fe 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -40,7 +40,7 @@ def call(cmd): else: assert False, 'cmd is not a string/unicode nor a list!' - print 'call: {0}'.format(args) + print('call: {0}'.format(args)) proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret = proc.wait() @@ -51,8 +51,8 @@ def expect(cmd, expected_ret): try: (r, p) = call(cmd) except ValueError as e: - print >> sys.stderr, \ - 'unable to run {c}: {err}'.format(c=repr(cmd), err=e.message) + print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message), + file=sys.stderr) return errno.EINVAL assert r == p.returncode, \ @@ -280,7 +280,7 @@ def test_all(): if len(cmd_args) > 0: (cmd_args_key, cmd_args_val) = cmd_args.split('=') - print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd) + print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)) # gen keyring for (good_or_bad,kind_map) in perms.iteritems(): for (kind,lst) in kind_map.iteritems(): @@ -303,7 +303,7 @@ def test_all(): 'ceph auth get-or-create {n} {c}'.format( n=cname,c=run_cap), 0, k) # keyring generated - print 'testing {m}/{c}'.format(m=module,c=cmd_cmd) + print('testing {m}/{c}'.format(m=module,c=cmd_cmd)) # test for good_bad in perms.keys(): @@ -353,7 +353,7 @@ def main(): test_all() test_misc() - print 'OK' + print('OK') return 0 diff --git a/src/objsync/boto_del.py b/src/objsync/boto_del.py index ba512e1ca33a9..f738896dd9059 100755 --- a/src/objsync/boto_del.py +++ b/src/objsync/boto_del.py @@ -32,10 +32,10 @@ conn = S3Connection(calling_format=OrdinaryCallingFormat(), is_secure=False, aws_secret_access_key=os.environ["SKEY"]) bucket = conn.lookup(bucket_name) if (bucket == None): - print "bucket '%s' no longer exists" % bucket_name + print("bucket '%s' no longer exists" % bucket_name) sys.exit(0) -print "deleting bucket '%s' ..." % bucket_name +print("deleting bucket '%s' ..." % bucket_name) bucket.delete() -print "done." +print("done.") sys.exit(0) diff --git a/src/powerdns/pdns-backend-rgw.py b/src/powerdns/pdns-backend-rgw.py index 20a86c0dbb6fb..db409b32a1f54 100755 --- a/src/powerdns/pdns-backend-rgw.py +++ b/src/powerdns/pdns-backend-rgw.py @@ -176,7 +176,7 @@ def init_config(): cfg.read(config_locations) else: if not os.path.isfile(args.config): - print "Could not open configuration file %s" % args.config + print("Could not open configuration file %s" % args.config) sys.exit(1) cfg.read(args.config) @@ -268,7 +268,8 @@ def generate_app(config): # Initialize the configuration and generate the Application config = init_config() if config == None: - print "Could not parse configuration file. Tried to parse %s" % config_locations + print("Could not parse configuration file. " + "Tried to parse %s" % config_locations) sys.exit(1) app = generate_app(config) diff --git a/src/test/rgw/rgw_multi/tests_ps.py b/src/test/rgw/rgw_multi/tests_ps.py index 3461bc07a75ba..02fc7419a1e67 100644 --- a/src/test/rgw/rgw_multi/tests_ps.py +++ b/src/test/rgw/rgw_multi/tests_ps.py @@ -49,7 +49,7 @@ def set_contents_from_string(key, content): try: key.set_contents_from_string(content) except Exception as e: - print 'Error: ' + str(e) + print('Error: ' + str(e)) # HTTP endpoint functions @@ -184,7 +184,8 @@ class AMQPReceiver(object): break except Exception as error: remaining_retries -= 1 - print 'failed to connect to rabbitmq (remaining retries ' + str(remaining_retries) + '): ' + str(error) + print('failed to connect to rabbitmq (remaining retries ' + + str(remaining_retries) + '): ' + str(error)) time.sleep(0.5) if remaining_retries == 0: @@ -378,7 +379,7 @@ def init_rabbitmq(): proc = subprocess.Popen('rabbitmq-server') except Exception as error: log.info('failed to execute rabbitmq-server: %s', str(error)) - print 'failed to execute rabbitmq-server: %s' % str(error) + print('failed to execute rabbitmq-server: %s' % str(error)) return None # TODO add rabbitmq checkpoint instead of sleep time.sleep(5) @@ -628,14 +629,14 @@ def test_ps_info(): for i in range(number_of_objects): key = bucket.new_key(str(i)) key.set_contents_from_string('bar') - print 'Zonegroup: ' + zonegroup.name - print 'user: ' + get_user() - print 'tenant: ' + get_tenant() - print 'Master Zone' + print('Zonegroup: ' + zonegroup.name) + print('user: ' + get_user()) + print('tenant: ' + get_tenant()) + print('Master Zone') print_connection_info(zones[0].conn) - print 'PubSub Zone' + print('PubSub Zone') print_connection_info(ps_zones[0].conn) - print 'Bucket: ' + bucket_name + print('Bucket: ' + bucket_name) def test_ps_s3_notification_low_level(): @@ -918,7 +919,7 @@ def test_ps_s3_topic_with_secret_on_master(): try: result = bad_topic_conf.set_config() except Exception as err: - print 'Error is expected: ' + str(err) + print('Error is expected: ' + str(err)) else: assert False, 'user password configuration set allowed only over HTTPS' @@ -1105,10 +1106,10 @@ def ps_s3_notification_filter(on_master): assert_equal(status/100, 2) skip_notif4 = False except Exception as error: - print 'note: metadata filter is not supported by boto3 - skipping test' + print('note: metadata filter is not supported by boto3 - skipping test') skip_notif4 = True else: - print 'filtering by attributes only supported on master zone' + print('filtering by attributes only supported on master zone') skip_notif4 = True @@ -1159,7 +1160,7 @@ def ps_s3_notification_filter(on_master): key.set_contents_from_string('bar') if on_master: - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) else: zone_bucket_checkpoint(ps_zone.zone, zones[0].zone, bucket_name) @@ -1235,7 +1236,7 @@ def test_ps_s3_notification_errors_on_master(): try: result, status = s3_notification_conf.set_config() except Exception as error: - print str(error) + ' - is expected' + print(str(error) + ' - is expected') else: assert False, 'invalid event name is expected to fail' @@ -1248,7 +1249,7 @@ def test_ps_s3_notification_errors_on_master(): try: _, _ = s3_notification_conf.set_config() except Exception as error: - print str(error) + ' - is expected' + print(str(error) + ' - is expected') else: assert False, 'missing notification name is expected to fail' @@ -1262,7 +1263,7 @@ def test_ps_s3_notification_errors_on_master(): try: _, _ = s3_notification_conf.set_config() except Exception as error: - print str(error) + ' - is expected' + print(str(error) + ' - is expected') else: assert False, 'invalid ARN is expected to fail' @@ -1276,7 +1277,7 @@ def test_ps_s3_notification_errors_on_master(): try: _, _ = s3_notification_conf.set_config() except Exception as error: - print str(error) + ' - is expected' + print(str(error) + ' - is expected') else: assert False, 'unknown topic is expected to fail' @@ -1289,7 +1290,7 @@ def test_ps_s3_notification_errors_on_master(): try: _, _ = s3_notification_conf.set_config() except Exception as error: - print str(error) + ' - is expected' + print(str(error) + ' - is expected') else: assert False, 'unknown bucket is expected to fail' @@ -1315,7 +1316,7 @@ def test_objcet_timing(): bucket_name = gen_bucket_name() bucket = zones[0].create_bucket(bucket_name) # create objects in the bucket (async) - print 'creating objects...' + print('creating objects...') number_of_objects = 1000 client_threads = [] start_time = time.time() @@ -1328,11 +1329,11 @@ def test_objcet_timing(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'total number of objects: ' + str(len(list(bucket.list()))) + print('total number of objects: ' + str(len(list(bucket.list())))) - print 'deleting objects...' + print('deleting objects...') client_threads = [] start_time = time.time() for key in bucket.list(): @@ -1342,7 +1343,7 @@ def test_objcet_timing(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') # cleanup zones[0].delete_bucket(bucket_name) @@ -1409,14 +1410,14 @@ def test_ps_s3_notification_push_amqp_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver keys = list(bucket.list()) - print 'total number of objects: ' + str(len(keys)) + print('total number of objects: ' + str(len(keys))) receiver1.verify_s3_events(keys, exact_match=True) receiver2.verify_s3_events(keys, exact_match=True) @@ -1430,9 +1431,9 @@ def test_ps_s3_notification_push_amqp_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver 1 for deletions @@ -1589,9 +1590,9 @@ def test_ps_s3_notification_push_kafka_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) keys = list(bucket.list()) receiver.verify_s3_events(keys, exact_match=True) @@ -1606,9 +1607,9 @@ def test_ps_s3_notification_push_kafka_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) receiver.verify_s3_events(keys, exact_match=True, deletions=True) @@ -1684,10 +1685,10 @@ def kafka_security(security_type): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') try: - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) keys = list(bucket.list()) receiver.verify_s3_events(keys, exact_match=True) @@ -1702,9 +1703,9 @@ def kafka_security(security_type): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) receiver.verify_s3_events(keys, exact_match=True, deletions=True) except Exception as err: @@ -1780,7 +1781,7 @@ def test_ps_s3_notification_multi_delete_on_master(): start_time = time.time() delete_all_objects(zones[0].conn, bucket_name) time_diff = time.time() - start_time - print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') print('wait for 5sec for the messages...') time.sleep(5) @@ -1844,14 +1845,14 @@ def test_ps_s3_notification_push_http_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check http receiver keys = list(bucket.list()) - print 'total number of objects: ' + str(len(keys)) + print('total number of objects: ' + str(len(keys))) http_server.verify_s3_events(keys, exact_match=True) # delete objects from the bucket @@ -1864,9 +1865,9 @@ def test_ps_s3_notification_push_http_on_master(): [thr.join() for thr in client_threads] time_diff = time.time() - start_time - print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds' + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check http receiver @@ -2595,7 +2596,7 @@ def test_ps_s3_creation_triggers_on_master(): uploader.complete_upload() fp.close() - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver @@ -2675,7 +2676,7 @@ def test_ps_s3_multipart_on_master(): uploader.complete_upload() fp.close() - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver @@ -3121,7 +3122,7 @@ def test_ps_s3_versioned_deletion_on_master(): bucket.delete_key(key.name, version_id=v1) delete_marker_key.delete() - print 'wait for 5sec for the messages...' + print('wait for 5sec for the messages...') time.sleep(5) # check amqp receiver diff --git a/src/test/rgw/rgw_multi/zone_ps.py b/src/test/rgw/rgw_multi/zone_ps.py index 0d4bb37ea7c88..9c512b16498b5 100644 --- a/src/test/rgw/rgw_multi/zone_ps.py +++ b/src/test/rgw/rgw_multi/zone_ps.py @@ -165,10 +165,10 @@ def delete_all_s3_topics(zone, region): topics = client.list_topics()['Topics'] for topic in topics: - print 'topic cleanup, deleting: ' + topic['TopicArn'] + print('topic cleanup, deleting: ' + topic['TopicArn']) assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200 except Exception as err: - print 'failed to do topic cleanup: ' + str(err) + print('failed to do topic cleanup: ' + str(err)) def delete_all_objects(conn, bucket_name):