]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: use python3 compatible print
authorKyr Shatskyy <kyrylo.shatskyy@suse.com>
Mon, 7 Oct 2019 14:09:05 +0000 (16:09 +0200)
committerKefu Chai <kchai@redhat.com>
Tue, 2 Jun 2020 02:32:22 +0000 (10:32 +0800)
Fixes: https://tracker.ceph.com/issues/42210
Signed-off-by: Kyr Shatskyy <kyrylo.shatskyy@suse.com>
(cherry picked from commit 7e87f80a8754c93c10ff937844ad456d4a236f57)

Conflicts:
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_journal_migration.py
qa/workunits/mon/caps.py
src/test/rgw/rgw_multi/tests_ps.py
src/test/rgw/rgw_multi/zone_ps.py
src/test/rgw/test_multi.py: trivial resolutions

16 files changed:
qa/tasks/ceph_manager.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/kernel_mount.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_forward_scrub.py
qa/tasks/cephfs/test_full.py
qa/tasks/radosgw_admin.py
qa/tasks/resolve_stuck_peering.py
qa/tasks/s3a_hadoop.py
qa/tasks/scrub.py
qa/workunits/mon/caps.py
src/objsync/boto_del.py
src/powerdns/pdns-backend-rgw.py
src/test/rgw/rgw_multi/tests_ps.py
src/test/rgw/rgw_multi/zone_ps.py

index 72da3c606135a4f4756dc0d94f5f63036a25346f..d2301a5fe2fa6eefc9777e818ce9059ed71f4268 100644 (file)
@@ -138,7 +138,7 @@ class Thrasher:
                 """
                 Implement log behavior
                 """
-                print x
+                print(x)
             self.log = tmp
         if self.config is None:
             self.config = dict()
@@ -1121,7 +1121,7 @@ class CephManager:
                 """
                 implement log behavior.
                 """
-                print x
+                print(x)
             self.log = tmp
         if self.config is None:
             self.config = dict()
index 56a39790759c2579599c788921f11b383d5f483f..61d28e38ee52bbb4dd83465cbaee793f27404020 100644 (file)
@@ -427,7 +427,7 @@ def find_socket(client_name):
                         return f
         raise RuntimeError("Client socket {{0}} not found".format(client_name))
 
-print find_socket("{client_name}")
+print(find_socket("{client_name}"))
 """.format(
             asok_path=self._asok_path(),
             client_name="client.{0}".format(self.client_id))
index f79c1a27e5a7722ce402eb85b49f3e4fd204bc72..c5f6dab8aed86a2befecf547dd9c9eaf2d48280d 100644 (file)
@@ -208,7 +208,7 @@ class KernelMount(CephFSMount):
                     result[client_id] = dir
                 return result
 
-            print json.dumps(get_id_to_dir())
+            print(json.dumps(get_id_to_dir()))
             """)
 
         p = self.client_remote.run(args=[
@@ -230,7 +230,7 @@ class KernelMount(CephFSMount):
         pyscript = dedent("""
             import os
 
-            print open(os.path.join("{debug_dir}", "{filename}")).read()
+            print(open(os.path.join("{debug_dir}", "{filename}")).read())
             """).format(debug_dir=debug_dir, filename=filename)
 
         p = self.client_remote.run(args=[
index bbaf0e510d9493eace949f4adbd37d829164628b..339e028016201d2692354209b9f1b14c74180d49 100644 (file)
@@ -554,9 +554,9 @@ class CephFSMount(object):
                 sys.exit(e.errno)
 
             attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"]
-            print json.dumps(
+            print(json.dumps(
                 dict([(a, getattr(s, a)) for a in attrs]),
-                indent=2)
+                indent=2))
             """).format(path=abs_path)
         proc = self._run_python(pyscript)
         if wait:
@@ -596,14 +596,14 @@ class CephFSMount(object):
                 import os
                 import stat
 
-                print os.stat("{path}").st_ino
+                print(os.stat("{path}").st_ino)
                 """).format(path=abs_path)
         else:
             pyscript = dedent("""
                 import os
                 import stat
 
-                print os.lstat("{path}").st_ino
+                print(os.lstat("{path}").st_ino)
                 """).format(path=abs_path)
 
         proc = self._run_python(pyscript)
@@ -617,7 +617,7 @@ class CephFSMount(object):
             import os
             import stat
 
-            print os.stat("{path}").st_nlink
+            print(os.stat("{path}").st_nlink)
             """).format(path=abs_path)
 
         proc = self._run_python(pyscript)
index 73bd815c4f70a239f00e45a53d30cda903dab154..bdd8e1388d2adec1ba8306165acdd89d659d00d6 100644 (file)
@@ -472,22 +472,22 @@ class TestClientRecovery(CephFSTestCase):
 
                 path = "{path}"
 
-                print "Starting creation..."
+                print("Starting creation...")
                 start = time.time()
 
                 os.mkdir(path)
                 dfd = os.open(path, os.O_DIRECTORY)
 
                 fd = open(os.path.join(path, "childfile"), "w")
-                print "Finished creation in {{0}}s".format(time.time() - start)
+                print("Finished creation in {{0}}s".format(time.time() - start))
 
-                print "Starting fsync..."
+                print("Starting fsync...")
                 start = time.time()
                 if {dirfsync}:
                     os.fsync(dfd)
                 else:
                     os.fsync(fd)
-                print "Finished fsync in {{0}}s".format(time.time() - start)
+                print("Finished fsync in {{0}}s".format(time.time() - start))
             """.format(path=path,dirfsync=str(dirfsync)))
         )
 
@@ -570,7 +570,7 @@ class TestClientRecovery(CephFSTestCase):
             cephfs.mount()
             client_id = cephfs.get_instance_id()
             cephfs.abort_conn()
-            print client_id
+            print(client_id)
             """)
         )
         gid = int(gid_str);
index b0f85e3213f6233e8c137308cf880afb23032e4b..20a079d72d7deb2bb1e2b6e3693b8d858818aee2 100644 (file)
@@ -202,7 +202,7 @@ class TestForwardScrub(CephFSTestCase):
         inotable_dict = {}
         for rank in ranks:
             inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
-            print "Trying to fetch inotable object: " + inotable_oid
+            print("Trying to fetch inotable object: " + inotable_oid)
 
             #self.fs.get_metadata_object("InoTable", "mds0_inotable")
             inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
index ea63dfc6f94d99e166e13a2cb68467cf1299908c..d51e24794e029d3dc2ecb696a35ed3b1cf9864fb 100644 (file)
@@ -229,12 +229,12 @@ class FullnessTestCase(CephFSTestCase):
             import os
 
             # Write some buffered data through before going full, all should be well
-            print "writing some data through which we expect to succeed"
+            print("writing some data through which we expect to succeed")
             bytes = 0
             f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
             bytes += os.write(f, 'a' * 512 * 1024)
             os.fsync(f)
-            print "fsync'ed data successfully, will now attempt to fill fs"
+            print("fsync'ed data successfully, will now attempt to fill fs")
 
             # Okay, now we're going to fill up the filesystem, and then keep
             # writing until we see an error from fsync.  As long as we're doing
@@ -244,26 +244,26 @@ class FullnessTestCase(CephFSTestCase):
 
             for n in range(0, int({fill_mb} * 0.9)):
                 bytes += os.write(f, 'x' * 1024 * 1024)
-                print "wrote {{0}} bytes via buffered write, may repeat".format(bytes)
-            print "done writing {{0}} bytes".format(bytes)
+                print("wrote {{0}} bytes via buffered write, may repeat".format(bytes))
+            print("done writing {{0}} bytes".format(bytes))
 
             # OK, now we should sneak in under the full condition
             # due to the time it takes the OSDs to report to the
             # mons, and get a successful fsync on our full-making data
             os.fsync(f)
-            print "successfully fsync'ed prior to getting full state reported"
+            print("successfully fsync'ed prior to getting full state reported")
 
             # buffered write, add more dirty data to the buffer
-            print "starting buffered write"
+            print("starting buffered write")
             try:
                 for n in range(0, int({fill_mb} * 0.2)):
                     bytes += os.write(f, 'x' * 1024 * 1024)
-                    print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+                    print("sleeping a bit as we've exceeded 90% of our expected full ratio")
                     time.sleep({full_wait})
             except OSError:
                 pass;
 
-            print "wrote, now waiting 30s and then doing a close we expect to fail"
+            print("wrote, now waiting 30s and then doing a close we expect to fail")
 
             # Wait long enough for a background flush that should fail
             time.sleep(30)
@@ -273,7 +273,7 @@ class FullnessTestCase(CephFSTestCase):
                 try:
                     os.close(f)
                 except OSError:
-                    print "close() returned an error as expected"
+                    print("close() returned an error as expected")
                 else:
                     raise RuntimeError("close() failed to raise error")
             else:
@@ -300,12 +300,12 @@ class FullnessTestCase(CephFSTestCase):
             import os
 
             # Write some buffered data through before going full, all should be well
-            print "writing some data through which we expect to succeed"
+            print("writing some data through which we expect to succeed")
             bytes = 0
             f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
             bytes += os.write(f, 'a' * 4096)
             os.fsync(f)
-            print "fsync'ed data successfully, will now attempt to fill fs"
+            print("fsync'ed data successfully, will now attempt to fill fs")
 
             # Okay, now we're going to fill up the filesystem, and then keep
             # writing until we see an error from fsync.  As long as we're doing
@@ -316,25 +316,25 @@ class FullnessTestCase(CephFSTestCase):
             for n in range(0, int({fill_mb} * 1.1)):
                 try:
                     bytes += os.write(f, 'x' * 1024 * 1024)
-                    print "wrote bytes via buffered write, moving on to fsync"
+                    print("wrote bytes via buffered write, moving on to fsync")
                 except OSError as e:
-                    print "Unexpected error %s from write() instead of fsync()" % e
+                    print("Unexpected error %s from write() instead of fsync()" % e)
                     raise
 
                 try:
                     os.fsync(f)
-                    print "fsync'ed successfully"
+                    print("fsync'ed successfully")
                 except OSError as e:
-                    print "Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))
+                    print("Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0)))
                     full = True
                     break
                 else:
-                    print "Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))
+                    print("Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0)))
 
                 if n > {fill_mb} * 0.9:
                     # Be cautious in the last region where we expect to hit
                     # the full condition, so that we don't overshoot too dramatically
-                    print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+                    print("sleeping a bit as we've exceeded 90% of our expected full ratio")
                     time.sleep({full_wait})
 
             if not full:
@@ -343,9 +343,9 @@ class FullnessTestCase(CephFSTestCase):
             # close() should not raise an error because we already caught it in
             # fsync.  There shouldn't have been any more writeback errors
             # since then because all IOs got cancelled on the full flag.
-            print "calling close"
+            print("calling close")
             os.close(f)
-            print "close() did not raise error"
+            print("close() did not raise error")
 
             os.unlink("{file_path}")
             """)
index d465fcb471760112a04214f5fd76712badf1d4e3..d6faa03e0ed37feb6e30051af72a4f467adb1fed 100644 (file)
@@ -131,7 +131,7 @@ class usage_acc:
             for b in e['buckets']:
                 c = b['categories']
                 if b['bucket'] == 'nosuchbucket':
-                    print "got here"
+                    print("got here")
                 try:
                     b2 = self.e2b(e2, b['bucket'], False)
                     if b2 != None:
@@ -232,7 +232,7 @@ def create_presigned_url(conn, method, bucket_name, key_name, expiration):
 
 def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
     url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
-    print url
+    print(url)
     h = httplib2.Http()
     h.follow_redirects = follow_redirects
     return h.request(url, method)
index 9b31343f2784b37dfbeec3cad436272344ef3eb2..d140544c4945a022efcf8aa4041767c54e6681d0 100644 (file)
@@ -82,7 +82,7 @@ def task(ctx, config):
     pgnum=0
     pgstr = manager.get_pgid(pool, pgnum)
     stats = manager.get_single_pg_stats(pgstr)
-    print stats['state']
+    print(stats['state'])
 
     timeout=60
     start=time.time()
@@ -100,7 +100,7 @@ def task(ctx, config):
     #expect the pg status to be active+undersized+degraded
     #pg should recover and become active+clean within timeout
     stats = manager.get_single_pg_stats(pgstr)
-    print stats['state']
+    print(stats['state'])
 
     timeout=10
     start=time.time()
index b0c4ede60028bb71d959de62af21fe3a2b9dd8a6..239be7cb9971d4ba734e0e3163e92cbce6501536 100644 (file)
@@ -165,7 +165,7 @@ conn = boto.connect_s3(
         )
 bucket = conn.create_bucket('{bucket_name}')
 for bucket in conn.get_all_buckets():
-        print bucket.name + "\t" + bucket.creation_date
+        print(bucket.name + "\t" + bucket.creation_date)
 """.format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name)
     py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir)
     misc.sudo_write_file(
index 14d5103e19f232f1f35ab654a632653cda1d3afe..a8525b73bbda6d32ef744967016e15c9bf39219c 100644 (file)
@@ -82,7 +82,7 @@ class Scrubber:
         else:
             def tmp(x):
                 """Local display"""
-                print x
+                print(x)
             self.log = tmp
 
         self.stopping = False
index e83bcad9e7e6054d8fb6f81fa421baa0657f2f95..454bea37da6fe9b18763ceb2d4c1e91bd993bfee 100644 (file)
@@ -40,7 +40,7 @@ def call(cmd):
   else:
     assert False, 'cmd is not a string/unicode nor a list!'
 
-  print 'call: {0}'.format(args)
+  print('call: {0}'.format(args))
   proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   ret = proc.wait()
 
@@ -51,8 +51,8 @@ def expect(cmd, expected_ret):
   try:
     (r, p) = call(cmd)
   except ValueError as e:
-    print >> sys.stderr, \
-             'unable to run {c}: {err}'.format(c=repr(cmd), err=e.message)
+    print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message),
+          file=sys.stderr)
     return errno.EINVAL
 
   assert r == p.returncode, \
@@ -280,7 +280,7 @@ def test_all():
       if len(cmd_args) > 0:
         (cmd_args_key, cmd_args_val) = cmd_args.split('=')
 
-      print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)
+      print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd))
       # gen keyring
       for (good_or_bad,kind_map) in perms.iteritems():
         for (kind,lst) in kind_map.iteritems():
@@ -303,7 +303,7 @@ def test_all():
                 'ceph auth get-or-create {n} {c}'.format(
                   n=cname,c=run_cap), 0, k)
       # keyring generated
-      print 'testing {m}/{c}'.format(m=module,c=cmd_cmd)
+      print('testing {m}/{c}'.format(m=module,c=cmd_cmd))
 
       # test
       for good_bad in perms.keys():
@@ -353,7 +353,7 @@ def main():
   test_all()
   test_misc()
 
-  print 'OK'
+  print('OK')
 
   return 0
 
index ba512e1ca33a975817586a949c6901e178d696ed..f738896dd9059afa690e83fe4c90b80f66af61ef 100755 (executable)
@@ -32,10 +32,10 @@ conn = S3Connection(calling_format=OrdinaryCallingFormat(), is_secure=False,
                 aws_secret_access_key=os.environ["SKEY"])
 bucket = conn.lookup(bucket_name)
 if (bucket == None):
-    print "bucket '%s' no longer exists" % bucket_name
+    print("bucket '%s' no longer exists" % bucket_name)
     sys.exit(0)
 
-print "deleting bucket '%s' ..." % bucket_name
+print("deleting bucket '%s' ..." % bucket_name)
 bucket.delete()
-print "done."
+print("done.")
 sys.exit(0)
index 20a86c0dbb6fb8e127658b738e821d79b393a2e2..db409b32a1f544313aa9aaec47b813d989b560b3 100755 (executable)
@@ -176,7 +176,7 @@ def init_config():
         cfg.read(config_locations)
     else:
         if not os.path.isfile(args.config):
-            print "Could not open configuration file %s" % args.config
+            print("Could not open configuration file %s" % args.config)
             sys.exit(1)
 
         cfg.read(args.config)
@@ -268,7 +268,8 @@ def generate_app(config):
 # Initialize the configuration and generate the Application
 config = init_config()
 if config == None:
-    print "Could not parse configuration file. Tried to parse %s" % config_locations
+    print("Could not parse configuration file. "
+          "Tried to parse %s" % config_locations)
     sys.exit(1)
 
 app = generate_app(config)
index 3461bc07a75ba57851d88d5743890ca84869566b..02fc7419a1e672d0672ae64d4a72e856fe969410 100644 (file)
@@ -49,7 +49,7 @@ def set_contents_from_string(key, content):
     try:
         key.set_contents_from_string(content)
     except Exception as e:
-        print 'Error: ' + str(e) 
+        print('Error: ' + str(e))
 
 
 # HTTP endpoint functions
@@ -184,7 +184,8 @@ class AMQPReceiver(object):
                 break
             except Exception as error:
                 remaining_retries -= 1
-                print 'failed to connect to rabbitmq (remaining retries ' + str(remaining_retries) + '): ' + str(error)
+                print('failed to connect to rabbitmq (remaining retries '
+                    + str(remaining_retries) + '): ' + str(error))
                 time.sleep(0.5)
 
         if remaining_retries == 0:
@@ -378,7 +379,7 @@ def init_rabbitmq():
         proc = subprocess.Popen('rabbitmq-server')
     except Exception as error:
         log.info('failed to execute rabbitmq-server: %s', str(error))
-        print 'failed to execute rabbitmq-server: %s' % str(error)
+        print('failed to execute rabbitmq-server: %s' % str(error))
         return None
     # TODO add rabbitmq checkpoint instead of sleep
     time.sleep(5)
@@ -628,14 +629,14 @@ def test_ps_info():
     for i in range(number_of_objects):
         key = bucket.new_key(str(i))
         key.set_contents_from_string('bar')
-    print 'Zonegroup: ' + zonegroup.name
-    print 'user: ' + get_user()
-    print 'tenant: ' + get_tenant()
-    print 'Master Zone'
+    print('Zonegroup: ' + zonegroup.name)
+    print('user: ' + get_user())
+    print('tenant: ' + get_tenant())
+    print('Master Zone')
     print_connection_info(zones[0].conn)
-    print 'PubSub Zone'
+    print('PubSub Zone')
     print_connection_info(ps_zones[0].conn)
-    print 'Bucket: ' + bucket_name
+    print('Bucket: ' + bucket_name)
 
 
 def test_ps_s3_notification_low_level():
@@ -918,7 +919,7 @@ def test_ps_s3_topic_with_secret_on_master():
     try:
         result = bad_topic_conf.set_config()
     except Exception as err:
-        print 'Error is expected: ' + str(err)
+        print('Error is expected: ' + str(err))
     else:
         assert False, 'user password configuration set allowed only over HTTPS'
     
@@ -1105,10 +1106,10 @@ def ps_s3_notification_filter(on_master):
             assert_equal(status/100, 2)
             skip_notif4 = False
         except Exception as error:
-            print 'note: metadata filter is not supported by boto3 - skipping test'
+            print('note: metadata filter is not supported by boto3 - skipping test')
             skip_notif4 = True
     else:
-        print 'filtering by attributes only supported on master zone'
+        print('filtering by attributes only supported on master zone')
         skip_notif4 = True
 
 
@@ -1159,7 +1160,7 @@ def ps_s3_notification_filter(on_master):
         key.set_contents_from_string('bar')
 
     if on_master:
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
     else:
         zone_bucket_checkpoint(ps_zone.zone, zones[0].zone, bucket_name)
@@ -1235,7 +1236,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       result, status = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'invalid event name is expected to fail'
 
@@ -1248,7 +1249,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'missing notification name is expected to fail'
 
@@ -1262,7 +1263,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'invalid ARN is expected to fail'
 
@@ -1276,7 +1277,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'unknown topic is expected to fail'
 
@@ -1289,7 +1290,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'unknown bucket is expected to fail'
 
@@ -1315,7 +1316,7 @@ def test_objcet_timing():
     bucket_name = gen_bucket_name()
     bucket = zones[0].create_bucket(bucket_name)
     # create objects in the bucket (async)
-    print 'creating objects...'
+    print('creating objects...')
     number_of_objects = 1000
     client_threads = []
     start_time = time.time()
@@ -1328,11 +1329,11 @@ def test_objcet_timing():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
     
-    print 'total number of objects: ' + str(len(list(bucket.list())))
+    print('total number of objects: ' + str(len(list(bucket.list()))))
 
-    print 'deleting objects...'
+    print('deleting objects...')
     client_threads = []
     start_time = time.time()
     for key in bucket.list():
@@ -1342,7 +1343,7 @@ def test_objcet_timing():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
     
     # cleanup
     zones[0].delete_bucket(bucket_name)
@@ -1409,14 +1410,14 @@ def test_ps_s3_notification_push_amqp_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
     keys = list(bucket.list())
-    print 'total number of objects: ' + str(len(keys))
+    print('total number of objects: ' + str(len(keys)))
     receiver1.verify_s3_events(keys, exact_match=True)
     receiver2.verify_s3_events(keys, exact_match=True)
     
@@ -1430,9 +1431,9 @@ def test_ps_s3_notification_push_amqp_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check amqp receiver 1 for deletions
@@ -1589,9 +1590,9 @@ def test_ps_s3_notification_push_kafka_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     keys = list(bucket.list())
     receiver.verify_s3_events(keys, exact_match=True)
@@ -1606,9 +1607,9 @@ def test_ps_s3_notification_push_kafka_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     receiver.verify_s3_events(keys, exact_match=True, deletions=True)
     
@@ -1684,10 +1685,10 @@ def kafka_security(security_type):
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     try:
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
         keys = list(bucket.list())
         receiver.verify_s3_events(keys, exact_match=True)
@@ -1702,9 +1703,9 @@ def kafka_security(security_type):
         [thr.join() for thr in client_threads] 
         
         time_diff = time.time() - start_time
-        print 'average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+        print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
         receiver.verify_s3_events(keys, exact_match=True, deletions=True)
     except Exception as err:
@@ -1780,7 +1781,7 @@ def test_ps_s3_notification_multi_delete_on_master():
     start_time = time.time()
     delete_all_objects(zones[0].conn, bucket_name)
     time_diff = time.time() - start_time
-    print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
     print('wait for 5sec for the messages...')
     time.sleep(5)
@@ -1844,14 +1845,14 @@ def test_ps_s3_notification_push_http_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check http receiver
     keys = list(bucket.list())
-    print 'total number of objects: ' + str(len(keys))
+    print('total number of objects: ' + str(len(keys)))
     http_server.verify_s3_events(keys, exact_match=True)
     
     # delete objects from the bucket
@@ -1864,9 +1865,9 @@ def test_ps_s3_notification_push_http_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check http receiver
@@ -2595,7 +2596,7 @@ def test_ps_s3_creation_triggers_on_master():
     uploader.complete_upload()
     fp.close()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
@@ -2675,7 +2676,7 @@ def test_ps_s3_multipart_on_master():
     uploader.complete_upload()
     fp.close()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
@@ -3121,7 +3122,7 @@ def test_ps_s3_versioned_deletion_on_master():
     bucket.delete_key(key.name, version_id=v1)
     delete_marker_key.delete()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
index 0d4bb37ea7c88c25c513e030227068e4c6aed56a..9c512b16498b53006a503476b0ab7b84864548ec 100644 (file)
@@ -165,10 +165,10 @@ def delete_all_s3_topics(zone, region):
 
         topics = client.list_topics()['Topics']
         for topic in topics:
-            print 'topic cleanup, deleting: ' + topic['TopicArn']
+            print('topic cleanup, deleting: ' + topic['TopicArn'])
             assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
     except Exception as err:
-        print 'failed to do topic cleanup: ' + str(err)
+        print('failed to do topic cleanup: ' + str(err))
     
 
 def delete_all_objects(conn, bucket_name):