]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: use python3 compatible print 30758/head
authorKyr Shatskyy <kyrylo.shatskyy@suse.com>
Mon, 7 Oct 2019 14:09:05 +0000 (16:09 +0200)
committerKyr Shatskyy <kyrylo.shatskyy@suse.com>
Fri, 11 Oct 2019 17:02:04 +0000 (19:02 +0200)
Fixes: https://tracker.ceph.com/issues/42210
Signed-off-by: Kyr Shatskyy <kyrylo.shatskyy@suse.com>
18 files changed:
qa/tasks/ceph_manager.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/kernel_mount.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_forward_scrub.py
qa/tasks/cephfs/test_full.py
qa/tasks/cephfs/test_journal_migration.py
qa/tasks/radosgw_admin.py
qa/tasks/resolve_stuck_peering.py
qa/tasks/s3a_hadoop.py
qa/tasks/scrub.py
qa/workunits/mon/caps.py
src/objsync/boto_del.py
src/powerdns/pdns-backend-rgw.py
src/test/rgw/rgw_multi/tests_ps.py
src/test/rgw/rgw_multi/zone_ps.py
src/test/rgw/test_multi.py

index a635af3de5083726eb9d96ce88662dc71876e6bc..0d5691945cc557bc5627a4324132ecc5678ef9f6 100644 (file)
@@ -1213,7 +1213,7 @@ class CephManager:
                 """
                 implement log behavior.
                 """
-                print x
+                print(x)
             self.log = tmp
         if self.config is None:
             self.config = dict()
index bbd56b3c57c0c7446ccfe480ad44cd92f796ee82..71f7eb8062904a8c48d69709dd28e960189f7ae4 100644 (file)
@@ -429,7 +429,7 @@ def find_socket(client_name):
                         return f
         raise RuntimeError("Client socket {{0}} not found".format(client_name))
 
-print find_socket("{client_name}")
+print(find_socket("{client_name}"))
 """.format(
             asok_path=self._asok_path(),
             client_name="client.{0}".format(self.client_id))
index 3c33cc83c1c435f1e02a9838961b48b31e7c0e50..f5265b22c488a4ab4f0509a81f8d83d8939028a7 100644 (file)
@@ -208,7 +208,7 @@ class KernelMount(CephFSMount):
                     result[client_id] = dir
                 return result
 
-            print json.dumps(get_id_to_dir())
+            print(json.dumps(get_id_to_dir()))
             """)
 
         p = self.client_remote.run(args=[
@@ -230,7 +230,7 @@ class KernelMount(CephFSMount):
         pyscript = dedent("""
             import os
 
-            print open(os.path.join("{debug_dir}", "{filename}")).read()
+            print(open(os.path.join("{debug_dir}", "{filename}")).read())
             """).format(debug_dir=debug_dir, filename=filename)
 
         p = self.client_remote.run(args=[
index 9ac3a3b7b17b9fa2eedd065cd62ecb5c4862e7c3..557b50a661d432f3dac25761e064cda9b53cb7c3 100644 (file)
@@ -594,9 +594,9 @@ class CephFSMount(object):
                 sys.exit(e.errno)
 
             attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"]
-            print json.dumps(
+            print(json.dumps(
                 dict([(a, getattr(s, a)) for a in attrs]),
-                indent=2)
+                indent=2))
             """).format(stat_call=stat_call)
         proc = self._run_python(pyscript)
         if wait:
@@ -636,14 +636,14 @@ class CephFSMount(object):
                 import os
                 import stat
 
-                print os.stat("{path}").st_ino
+                print(os.stat("{path}").st_ino)
                 """).format(path=abs_path)
         else:
             pyscript = dedent("""
                 import os
                 import stat
 
-                print os.lstat("{path}").st_ino
+                print(os.lstat("{path}").st_ino)
                 """).format(path=abs_path)
 
         proc = self._run_python(pyscript)
@@ -657,7 +657,7 @@ class CephFSMount(object):
             import os
             import stat
 
-            print os.stat("{path}").st_nlink
+            print(os.stat("{path}").st_nlink)
             """).format(path=abs_path)
 
         proc = self._run_python(pyscript)
index 4cdaf7f2959a273133de487d01be27453dc04c46..f92bbb3cc5376e6f04563a0e3e8116b1d85b166c 100644 (file)
@@ -467,22 +467,22 @@ class TestClientRecovery(CephFSTestCase):
 
                 path = "{path}"
 
-                print "Starting creation..."
+                print("Starting creation...")
                 start = time.time()
 
                 os.mkdir(path)
                 dfd = os.open(path, os.O_DIRECTORY)
 
                 fd = open(os.path.join(path, "childfile"), "w")
-                print "Finished creation in {{0}}s".format(time.time() - start)
+                print("Finished creation in {{0}}s".format(time.time() - start))
 
-                print "Starting fsync..."
+                print("Starting fsync...")
                 start = time.time()
                 if {dirfsync}:
                     os.fsync(dfd)
                 else:
                     os.fsync(fd)
-                print "Finished fsync in {{0}}s".format(time.time() - start)
+                print("Finished fsync in {{0}}s".format(time.time() - start))
             """.format(path=path,dirfsync=str(dirfsync)))
         )
 
@@ -565,7 +565,7 @@ class TestClientRecovery(CephFSTestCase):
             cephfs.mount()
             client_id = cephfs.get_instance_id()
             cephfs.abort_conn()
-            print client_id
+            print(client_id)
             """)
         )
         gid = int(gid_str);
index b0f85e3213f6233e8c137308cf880afb23032e4b..20a079d72d7deb2bb1e2b6e3693b8d858818aee2 100644 (file)
@@ -202,7 +202,7 @@ class TestForwardScrub(CephFSTestCase):
         inotable_dict = {}
         for rank in ranks:
             inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
-            print "Trying to fetch inotable object: " + inotable_oid
+            print("Trying to fetch inotable object: " + inotable_oid)
 
             #self.fs.get_metadata_object("InoTable", "mds0_inotable")
             inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
index 02ffadd96f52bb67ab0f989340d169e99bb887a1..9ebab53549cb2c22508facd023a54f15583001ec 100644 (file)
@@ -247,12 +247,12 @@ class FullnessTestCase(CephFSTestCase):
             import os
 
             # Write some buffered data through before going full, all should be well
-            print "writing some data through which we expect to succeed"
+            print("writing some data through which we expect to succeed")
             bytes = 0
             f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
             bytes += os.write(f, 'a' * 512 * 1024)
             os.fsync(f)
-            print "fsync'ed data successfully, will now attempt to fill fs"
+            print("fsync'ed data successfully, will now attempt to fill fs")
 
             # Okay, now we're going to fill up the filesystem, and then keep
             # writing until we see an error from fsync.  As long as we're doing
@@ -262,26 +262,26 @@ class FullnessTestCase(CephFSTestCase):
 
             for n in range(0, int({fill_mb} * 0.9)):
                 bytes += os.write(f, 'x' * 1024 * 1024)
-                print "wrote {{0}} bytes via buffered write, may repeat".format(bytes)
-            print "done writing {{0}} bytes".format(bytes)
+                print("wrote {{0}} bytes via buffered write, may repeat".format(bytes))
+            print("done writing {{0}} bytes".format(bytes))
 
             # OK, now we should sneak in under the full condition
             # due to the time it takes the OSDs to report to the
             # mons, and get a successful fsync on our full-making data
             os.fsync(f)
-            print "successfully fsync'ed prior to getting full state reported"
+            print("successfully fsync'ed prior to getting full state reported")
 
             # buffered write, add more dirty data to the buffer
-            print "starting buffered write"
+            print("starting buffered write")
             try:
                 for n in range(0, int({fill_mb} * 0.2)):
                     bytes += os.write(f, 'x' * 1024 * 1024)
-                    print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+                    print("sleeping a bit as we've exceeded 90% of our expected full ratio")
                     time.sleep({full_wait})
             except OSError:
                 pass;
 
-            print "wrote, now waiting 30s and then doing a close we expect to fail"
+            print("wrote, now waiting 30s and then doing a close we expect to fail")
 
             # Wait long enough for a background flush that should fail
             time.sleep(30)
@@ -291,7 +291,7 @@ class FullnessTestCase(CephFSTestCase):
                 try:
                     os.close(f)
                 except OSError:
-                    print "close() returned an error as expected"
+                    print("close() returned an error as expected")
                 else:
                     raise RuntimeError("close() failed to raise error")
             else:
@@ -318,12 +318,12 @@ class FullnessTestCase(CephFSTestCase):
             import os
 
             # Write some buffered data through before going full, all should be well
-            print "writing some data through which we expect to succeed"
+            print("writing some data through which we expect to succeed")
             bytes = 0
             f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
             bytes += os.write(f, 'a' * 4096)
             os.fsync(f)
-            print "fsync'ed data successfully, will now attempt to fill fs"
+            print("fsync'ed data successfully, will now attempt to fill fs")
 
             # Okay, now we're going to fill up the filesystem, and then keep
             # writing until we see an error from fsync.  As long as we're doing
@@ -334,25 +334,25 @@ class FullnessTestCase(CephFSTestCase):
             for n in range(0, int({fill_mb} * 1.1)):
                 try:
                     bytes += os.write(f, 'x' * 1024 * 1024)
-                    print "wrote bytes via buffered write, moving on to fsync"
+                    print("wrote bytes via buffered write, moving on to fsync")
                 except OSError as e:
-                    print "Unexpected error %s from write() instead of fsync()" % e
+                    print("Unexpected error %s from write() instead of fsync()" % e)
                     raise
 
                 try:
                     os.fsync(f)
-                    print "fsync'ed successfully"
+                    print("fsync'ed successfully")
                 except OSError as e:
-                    print "Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))
+                    print("Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0)))
                     full = True
                     break
                 else:
-                    print "Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))
+                    print("Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0)))
 
                 if n > {fill_mb} * 0.9:
                     # Be cautious in the last region where we expect to hit
                     # the full condition, so that we don't overshoot too dramatically
-                    print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+                    print("sleeping a bit as we've exceeded 90% of our expected full ratio")
                     time.sleep({full_wait})
 
             if not full:
@@ -361,9 +361,9 @@ class FullnessTestCase(CephFSTestCase):
             # close() should not raise an error because we already caught it in
             # fsync.  There shouldn't have been any more writeback errors
             # since then because all IOs got cancelled on the full flag.
-            print "calling close"
+            print("calling close")
             os.close(f)
-            print "close() did not raise error"
+            print("close() did not raise error")
 
             os.unlink("{file_path}")
             """)
index bd431dd3d978922a2dc6811bb6eab48abdeef40c..de4867ef0a58073281ac6ba85f216d9c6aa7cf73 100644 (file)
@@ -80,7 +80,7 @@ class TestJournalMigration(CephFSTestCase):
             args=[
                 "python",
                 "-c",
-                "import json; print len(json.load(open('/tmp/journal.json')))"
+                "import json; print(len(json.load(open('/tmp/journal.json'))))"
             ],
             stdout=StringIO())
         event_count = int(p.stdout.getvalue().strip())
index 524afe0d0c94c93be7aee610ac1b4ab168964cb3..ce551e5c298a7dae17fc732c9c834543f666eaf2 100644 (file)
@@ -135,7 +135,7 @@ class usage_acc:
             for b in e['buckets']:
                 c = b['categories']
                 if b['bucket'] == 'nosuchbucket':
-                    print "got here"
+                    print("got here")
                 try:
                     b2 = self.e2b(e2, b['bucket'], False)
                     if b2 != None:
@@ -236,7 +236,7 @@ def create_presigned_url(conn, method, bucket_name, key_name, expiration):
 
 def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
     url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
-    print url
+    print(url)
     h = httplib2.Http()
     h.follow_redirects = follow_redirects
     return h.request(url, method)
index bdf86e9242e4928dc30aa6cc17438bee95a8b737..8e82ab0c1112998b2cb17662d71a623e60001702 100644 (file)
@@ -82,7 +82,7 @@ def task(ctx, config):
     pgnum=0
     pgstr = manager.get_pgid(pool, pgnum)
     stats = manager.get_single_pg_stats(pgstr)
-    print stats['state']
+    print(stats['state'])
 
     timeout=60
     start=time.time()
@@ -100,7 +100,7 @@ def task(ctx, config):
     #expect the pg status to be active+undersized+degraded
     #pg should recover and become active+clean within timeout
     stats = manager.get_single_pg_stats(pgstr)
-    print stats['state']
+    print(stats['state'])
 
     timeout=10
     start=time.time()
index ffc52ea7ab4786accdfe636b6c0aff1614820a53..d3c503b0d07eb17956e2cb7aadb4c1d705b00058 100644 (file)
@@ -169,7 +169,7 @@ conn = boto.connect_s3(
         )
 bucket = conn.create_bucket('{bucket_name}')
 for bucket in conn.get_all_buckets():
-        print bucket.name + "\t" + bucket.creation_date
+        print(bucket.name + "\t" + bucket.creation_date)
 """.format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name)
     py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir)
     misc.sudo_write_file(
index a6194c2c7ce534705cba42730e9783c99077a996..798ff7cb0437f51680a16e7bd8715cf89c75d67e 100644 (file)
@@ -82,7 +82,7 @@ class Scrubber:
         else:
             def tmp(x):
                 """Local display"""
-                print x
+                print(x)
             self.log = tmp
 
         self.stopping = False
index 7bc8c923920fced0b4f399139c6dbe88480e1d51..62de70e687e4828902692011faa1c83de3c8360c 100644 (file)
@@ -1,5 +1,7 @@
 #!/usr/bin/python
 
+from __future__ import print_function
+
 import json
 import subprocess
 import shlex
@@ -41,7 +43,7 @@ def call(cmd):
   else:
     assert False, 'cmd is not a string/unicode nor a list!'
 
-  print 'call: {0}'.format(args)
+  print('call: {0}'.format(args))
   proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   ret = proc.wait()
 
@@ -52,8 +54,8 @@ def expect(cmd, expected_ret):
   try:
     (r, p) = call(cmd)
   except ValueError as e:
-    print >> sys.stderr, \
-             'unable to run {c}: {err}'.format(c=repr(cmd), err=e.message)
+    print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message),
+          file=sys.stderr)
     return errno.EINVAL
 
   assert r == p.returncode, \
@@ -281,7 +283,7 @@ def test_all():
       if len(cmd_args) > 0:
         (cmd_args_key, cmd_args_val) = cmd_args.split('=')
 
-      print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)
+      print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd))
       # gen keyring
       for (good_or_bad,kind_map) in perms.iteritems():
         for (kind,lst) in kind_map.iteritems():
@@ -304,7 +306,7 @@ def test_all():
                 'ceph auth get-or-create {n} {c}'.format(
                   n=cname,c=run_cap), 0, k)
       # keyring generated
-      print 'testing {m}/{c}'.format(m=module,c=cmd_cmd)
+      print('testing {m}/{c}'.format(m=module,c=cmd_cmd))
 
       # test
       for good_bad in perms.iterkeys():
@@ -354,7 +356,7 @@ def main():
   test_all()
   test_misc()
 
-  print 'OK'
+  print('OK')
 
   return 0
 
index ba512e1ca33a975817586a949c6901e178d696ed..f738896dd9059afa690e83fe4c90b80f66af61ef 100755 (executable)
@@ -32,10 +32,10 @@ conn = S3Connection(calling_format=OrdinaryCallingFormat(), is_secure=False,
                 aws_secret_access_key=os.environ["SKEY"])
 bucket = conn.lookup(bucket_name)
 if (bucket == None):
-    print "bucket '%s' no longer exists" % bucket_name
+    print("bucket '%s' no longer exists" % bucket_name)
     sys.exit(0)
 
-print "deleting bucket '%s' ..." % bucket_name
+print("deleting bucket '%s' ..." % bucket_name)
 bucket.delete()
-print "done."
+print("done.")
 sys.exit(0)
index 6be92bab21526de091b93e11a242d6522f38c6da..fb60ac2ee5bd27c7b97dfed5d559bbd0c07840f8 100755 (executable)
@@ -176,7 +176,7 @@ def init_config():
         cfg.read(config_locations)
     else:
         if not os.path.isfile(args.config):
-            print "Could not open configuration file %s" % args.config
+            print("Could not open configuration file %s" % args.config)
             sys.exit(1)
 
         cfg.read(args.config)
@@ -268,7 +268,8 @@ def generate_app(config):
 # Initialize the configuration and generate the Application
 config = init_config()
 if config == None:
-    print "Could not parse configuration file. Tried to parse %s" % config_locations
+    print("Could not parse configuration file. "
+          "Tried to parse %s" % config_locations)
     sys.exit(1)
 
 app = generate_app(config)
index 73fb3ea24ecd4bf2be2c78a1ff46010b03165970..b2bdf3287c41953fa58a3d3422cde5b07a9d5580 100644 (file)
@@ -38,7 +38,7 @@ def set_contents_from_string(key, content):
     try:
         key.set_contents_from_string(content)
     except Exception as e:
-        print 'Error: ' + str(e) 
+        print('Error: ' + str(e))
 
 
 # HTTP endpoint functions
@@ -159,7 +159,8 @@ class AMQPReceiver(object):
                 break
             except Exception as error:
                 remaining_retries -= 1
-                print 'failed to connect to rabbitmq (remaining retries ' + str(remaining_retries) + '): ' + str(error)
+                print('failed to connect to rabbitmq (remaining retries '
+                    + str(remaining_retries) + '): ' + str(error))
 
         if remaining_retries == 0:
             raise Exception('failed to connect to rabbitmq - no retries left')
@@ -320,7 +321,7 @@ def init_rabbitmq():
         proc = subprocess.Popen('rabbitmq-server')
     except Exception as error:
         log.info('failed to execute rabbitmq-server: %s', str(error))
-        print 'failed to execute rabbitmq-server: %s' % str(error)
+        print('failed to execute rabbitmq-server: %s' % str(error))
         return None
     # TODO add rabbitmq checkpoint instead of sleep
     time.sleep(5)
@@ -405,14 +406,14 @@ def test_ps_info():
     for i in range(number_of_objects):
         key = bucket.new_key(str(i))
         key.set_contents_from_string('bar')
-    print 'Zonegroup: ' + zonegroup.name
-    print 'user: ' + get_user()
-    print 'tenant: ' + get_tenant()
-    print 'Master Zone'
+    print('Zonegroup: ' + zonegroup.name)
+    print('user: ' + get_user())
+    print('tenant: ' + get_tenant())
+    print('Master Zone')
     print_connection_info(zones[0].conn)
-    print 'PubSub Zone'
+    print('PubSub Zone')
     print_connection_info(ps_zones[0].conn)
-    print 'Bucket: ' + bucket_name
+    print('Bucket: ' + bucket_name)
 
 
 def test_ps_s3_notification_low_level():
@@ -830,10 +831,10 @@ def ps_s3_notification_filter(on_master):
             assert_equal(status/100, 2)
             skip_notif4 = False
         except Exception as error:
-            print 'note: metadata filter is not supported by boto3 - skipping test'
+            print('note: metadata filter is not supported by boto3 - skipping test')
             skip_notif4 = True
     else:
-        print 'filtering by attributes only supported on master zone'
+        print('filtering by attributes only supported on master zone')
         skip_notif4 = True
 
 
@@ -884,7 +885,7 @@ def ps_s3_notification_filter(on_master):
         key.set_contents_from_string('bar')
 
     if on_master:
-        print 'wait for 5sec for the messages...'
+        print('wait for 5sec for the messages...')
         time.sleep(5)
     else:
         zone_bucket_checkpoint(ps_zone.zone, zones[0].zone, bucket_name)
@@ -960,7 +961,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       result, status = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'invalid event name is expected to fail'
 
@@ -973,7 +974,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'missing notification name is expected to fail'
 
@@ -987,7 +988,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'invalid ARN is expected to fail'
 
@@ -1001,7 +1002,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'unknown topic is expected to fail'
 
@@ -1014,7 +1015,7 @@ def test_ps_s3_notification_errors_on_master():
     try:
       _, _ = s3_notification_conf.set_config()
     except Exception as error:
-      print str(error) + ' - is expected'
+      print(str(error) + ' - is expected')
     else:
       assert False, 'unknown bucket is expected to fail'
 
@@ -1040,7 +1041,7 @@ def test_objcet_timing():
     bucket_name = gen_bucket_name()
     bucket = zones[0].create_bucket(bucket_name)
     # create objects in the bucket (async)
-    print 'creating objects...'
+    print('creating objects...')
     number_of_objects = 1000
     client_threads = []
     start_time = time.time()
@@ -1053,11 +1054,11 @@ def test_objcet_timing():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
     
-    print 'total number of objects: ' + str(len(list(bucket.list())))
+    print('total number of objects: ' + str(len(list(bucket.list()))))
 
-    print 'deleting objects...'
+    print('deleting objects...')
     client_threads = []
     start_time = time.time()
     for key in bucket.list():
@@ -1067,7 +1068,7 @@ def test_objcet_timing():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
     
     # cleanup
     zones[0].delete_bucket(bucket_name)
@@ -1134,14 +1135,14 @@ def test_ps_s3_notification_push_amqp_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
     keys = list(bucket.list())
-    print 'total number of objects: ' + str(len(keys))
+    print('total number of objects: ' + str(len(keys)))
     receiver1.verify_s3_events(keys, exact_match=True)
     receiver2.verify_s3_events(keys, exact_match=True)
     
@@ -1155,9 +1156,9 @@ def test_ps_s3_notification_push_amqp_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check amqp receiver 1 for deletions
@@ -1231,14 +1232,14 @@ def test_ps_s3_notification_push_http_on_master():
     [thr.join() for thr in client_threads] 
 
     time_diff = time.time() - start_time
-    print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check http receiver
     keys = list(bucket.list())
-    print 'total number of objects: ' + str(len(keys))
+    print('total number of objects: ' + str(len(keys)))
     http_server.verify_s3_events(keys, exact_match=True)
     
     # delete objects from the bucket
@@ -1251,9 +1252,9 @@ def test_ps_s3_notification_push_http_on_master():
     [thr.join() for thr in client_threads] 
     
     time_diff = time.time() - start_time
-    print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+    print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     
     # check http receiver
@@ -1840,7 +1841,7 @@ def test_ps_s3_creation_triggers_on_master():
     uploader.complete_upload()
     fp.close()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
@@ -1920,7 +1921,7 @@ def test_ps_s3_multipart_on_master():
     uploader.complete_upload()
     fp.close()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
@@ -2092,7 +2093,7 @@ def test_ps_s3_metadata_on_master():
     key.set_metadata('meta1', 'This is my metadata value')
     key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
     keys = list(bucket.list())
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
     # check amqp receiver
     receiver.verify_s3_events(keys, exact_match=True)
@@ -2168,7 +2169,7 @@ def test_ps_s3_versioned_deletion_on_master():
     bucket.delete_key(key.name, version_id=v1)
     delete_marker_key.delete()
 
-    print 'wait for 5sec for the messages...'
+    print('wait for 5sec for the messages...')
     time.sleep(5)
 
     # check amqp receiver
index 8ee49f2036b164d1bda7b7a5c968908a84d5db2a..cfdf480ee0b5d1d5f52bfb3f2a9b3b1663522495 100644 (file)
@@ -141,10 +141,11 @@ def delete_all_s3_topics(conn, region):
 
         topics = client.list_topics()['Topics']
         for topic in topics:
-            print 'topic cleanup, deleting: ' + topic['TopicArn']
+            print('topic cleanup, deleting: ' + topic['TopicArn'])
             assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
     except:
-        print 'failed to do topic cleanup. if there are topics they may need to be manually deleted'
+        print('failed to do topic cleanup. if there are topics '
+              'they may need to be manually deleted')
     
 
 class PSTopicS3:
index d2ccbe505c86b765d9c896c0f582b214765b45ca..53ac815d942350a79b7537b1839dbea66add1cc7 100644 (file)
@@ -269,7 +269,7 @@ def init(parse_args):
     num_az_zones = cfg.getint(section, 'num_az_zones')
 
     num_ps_zones = args.num_ps_zones if num_ps_zones_from_conf == 0 else num_ps_zones_from_conf 
-    print 'num_ps_zones = ' + str(num_ps_zones)
+    print('num_ps_zones = ' + str(num_ps_zones))
 
     num_zones = args.num_zones + num_es_zones + num_cloud_zones + num_ps_zones + num_az_zones