"""
implement log behavior.
"""
- print x
+ print(x)
self.log = tmp
if self.config is None:
self.config = dict()
return f
raise RuntimeError("Client socket {{0}} not found".format(client_name))
-print find_socket("{client_name}")
+print(find_socket("{client_name}"))
""".format(
asok_path=self._asok_path(),
client_name="client.{0}".format(self.client_id))
result[client_id] = dir
return result
- print json.dumps(get_id_to_dir())
+ print(json.dumps(get_id_to_dir()))
""")
p = self.client_remote.run(args=[
pyscript = dedent("""
import os
- print open(os.path.join("{debug_dir}", "{filename}")).read()
+ print(open(os.path.join("{debug_dir}", "{filename}")).read())
""").format(debug_dir=debug_dir, filename=filename)
p = self.client_remote.run(args=[
sys.exit(e.errno)
attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"]
- print json.dumps(
+ print(json.dumps(
dict([(a, getattr(s, a)) for a in attrs]),
- indent=2)
+ indent=2))
""").format(stat_call=stat_call)
proc = self._run_python(pyscript)
if wait:
import os
import stat
- print os.stat("{path}").st_ino
+ print(os.stat("{path}").st_ino)
""").format(path=abs_path)
else:
pyscript = dedent("""
import os
import stat
- print os.lstat("{path}").st_ino
+ print(os.lstat("{path}").st_ino)
""").format(path=abs_path)
proc = self._run_python(pyscript)
import os
import stat
- print os.stat("{path}").st_nlink
+ print(os.stat("{path}").st_nlink)
""").format(path=abs_path)
proc = self._run_python(pyscript)
path = "{path}"
- print "Starting creation..."
+ print("Starting creation...")
start = time.time()
os.mkdir(path)
dfd = os.open(path, os.O_DIRECTORY)
fd = open(os.path.join(path, "childfile"), "w")
- print "Finished creation in {{0}}s".format(time.time() - start)
+ print("Finished creation in {{0}}s".format(time.time() - start))
- print "Starting fsync..."
+ print("Starting fsync...")
start = time.time()
if {dirfsync}:
os.fsync(dfd)
else:
os.fsync(fd)
- print "Finished fsync in {{0}}s".format(time.time() - start)
+ print("Finished fsync in {{0}}s".format(time.time() - start))
""".format(path=path,dirfsync=str(dirfsync)))
)
cephfs.mount()
client_id = cephfs.get_instance_id()
cephfs.abort_conn()
- print client_id
+ print(client_id)
""")
)
gid = int(gid_str);
inotable_dict = {}
for rank in ranks:
inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable"
- print "Trying to fetch inotable object: " + inotable_oid
+ print("Trying to fetch inotable object: " + inotable_oid)
#self.fs.get_metadata_object("InoTable", "mds0_inotable")
inotable_raw = self.fs.get_metadata_object_raw(inotable_oid)
import os
# Write some buffered data through before going full, all should be well
- print "writing some data through which we expect to succeed"
+ print("writing some data through which we expect to succeed")
bytes = 0
f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
bytes += os.write(f, 'a' * 512 * 1024)
os.fsync(f)
- print "fsync'ed data successfully, will now attempt to fill fs"
+ print("fsync'ed data successfully, will now attempt to fill fs")
# Okay, now we're going to fill up the filesystem, and then keep
# writing until we see an error from fsync. As long as we're doing
for n in range(0, int({fill_mb} * 0.9)):
bytes += os.write(f, 'x' * 1024 * 1024)
- print "wrote {{0}} bytes via buffered write, may repeat".format(bytes)
- print "done writing {{0}} bytes".format(bytes)
+ print("wrote {{0}} bytes via buffered write, may repeat".format(bytes))
+ print("done writing {{0}} bytes".format(bytes))
# OK, now we should sneak in under the full condition
# due to the time it takes the OSDs to report to the
# mons, and get a successful fsync on our full-making data
os.fsync(f)
- print "successfully fsync'ed prior to getting full state reported"
+ print("successfully fsync'ed prior to getting full state reported")
# buffered write, add more dirty data to the buffer
- print "starting buffered write"
+ print("starting buffered write")
try:
for n in range(0, int({fill_mb} * 0.2)):
bytes += os.write(f, 'x' * 1024 * 1024)
- print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+ print("sleeping a bit as we've exceeded 90% of our expected full ratio")
time.sleep({full_wait})
except OSError:
pass;
- print "wrote, now waiting 30s and then doing a close we expect to fail"
+ print("wrote, now waiting 30s and then doing a close we expect to fail")
# Wait long enough for a background flush that should fail
time.sleep(30)
try:
os.close(f)
except OSError:
- print "close() returned an error as expected"
+ print("close() returned an error as expected")
else:
raise RuntimeError("close() failed to raise error")
else:
import os
# Write some buffered data through before going full, all should be well
- print "writing some data through which we expect to succeed"
+ print("writing some data through which we expect to succeed")
bytes = 0
f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT)
bytes += os.write(f, 'a' * 4096)
os.fsync(f)
- print "fsync'ed data successfully, will now attempt to fill fs"
+ print("fsync'ed data successfully, will now attempt to fill fs")
# Okay, now we're going to fill up the filesystem, and then keep
# writing until we see an error from fsync. As long as we're doing
for n in range(0, int({fill_mb} * 1.1)):
try:
bytes += os.write(f, 'x' * 1024 * 1024)
- print "wrote bytes via buffered write, moving on to fsync"
+ print("wrote bytes via buffered write, moving on to fsync")
except OSError as e:
- print "Unexpected error %s from write() instead of fsync()" % e
+ print("Unexpected error %s from write() instead of fsync()" % e)
raise
try:
os.fsync(f)
- print "fsync'ed successfully"
+ print("fsync'ed successfully")
except OSError as e:
- print "Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))
+ print("Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0)))
full = True
break
else:
- print "Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))
+ print("Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0)))
if n > {fill_mb} * 0.9:
# Be cautious in the last region where we expect to hit
# the full condition, so that we don't overshoot too dramatically
- print "sleeping a bit as we've exceeded 90% of our expected full ratio"
+ print("sleeping a bit as we've exceeded 90% of our expected full ratio")
time.sleep({full_wait})
if not full:
# close() should not raise an error because we already caught it in
# fsync. There shouldn't have been any more writeback errors
# since then because all IOs got cancelled on the full flag.
- print "calling close"
+ print("calling close")
os.close(f)
- print "close() did not raise error"
+ print("close() did not raise error")
os.unlink("{file_path}")
""")
args=[
"python",
"-c",
- "import json; print len(json.load(open('/tmp/journal.json')))"
+ "import json; print(len(json.load(open('/tmp/journal.json'))))"
],
stdout=StringIO())
event_count = int(p.stdout.getvalue().strip())
for b in e['buckets']:
c = b['categories']
if b['bucket'] == 'nosuchbucket':
- print "got here"
+ print("got here")
try:
b2 = self.e2b(e2, b['bucket'], False)
if b2 != None:
def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False):
url = create_presigned_url(conn, method, bucket_name, key_name, 3600)
- print url
+ print(url)
h = httplib2.Http()
h.follow_redirects = follow_redirects
return h.request(url, method)
pgnum=0
pgstr = manager.get_pgid(pool, pgnum)
stats = manager.get_single_pg_stats(pgstr)
- print stats['state']
+ print(stats['state'])
timeout=60
start=time.time()
#expect the pg status to be active+undersized+degraded
#pg should recover and become active+clean within timeout
stats = manager.get_single_pg_stats(pgstr)
- print stats['state']
+ print(stats['state'])
timeout=10
start=time.time()
)
bucket = conn.create_bucket('{bucket_name}')
for bucket in conn.get_all_buckets():
- print bucket.name + "\t" + bucket.creation_date
+ print(bucket.name + "\t" + bucket.creation_date)
""".format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name)
py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir)
misc.sudo_write_file(
else:
def tmp(x):
"""Local display"""
- print x
+ print(x)
self.log = tmp
self.stopping = False
#!/usr/bin/python
+from __future__ import print_function
+
import json
import subprocess
import shlex
else:
assert False, 'cmd is not a string/unicode nor a list!'
- print 'call: {0}'.format(args)
+ print('call: {0}'.format(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = proc.wait()
try:
(r, p) = call(cmd)
except ValueError as e:
- print >> sys.stderr, \
- 'unable to run {c}: {err}'.format(c=repr(cmd), err=e.message)
+ print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message),
+ file=sys.stderr)
return errno.EINVAL
assert r == p.returncode, \
if len(cmd_args) > 0:
(cmd_args_key, cmd_args_val) = cmd_args.split('=')
- print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)
+ print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd))
# gen keyring
for (good_or_bad,kind_map) in perms.iteritems():
for (kind,lst) in kind_map.iteritems():
'ceph auth get-or-create {n} {c}'.format(
n=cname,c=run_cap), 0, k)
# keyring generated
- print 'testing {m}/{c}'.format(m=module,c=cmd_cmd)
+ print('testing {m}/{c}'.format(m=module,c=cmd_cmd))
# test
for good_bad in perms.iterkeys():
test_all()
test_misc()
- print 'OK'
+ print('OK')
return 0
aws_secret_access_key=os.environ["SKEY"])
bucket = conn.lookup(bucket_name)
if (bucket == None):
- print "bucket '%s' no longer exists" % bucket_name
+ print("bucket '%s' no longer exists" % bucket_name)
sys.exit(0)
-print "deleting bucket '%s' ..." % bucket_name
+print("deleting bucket '%s' ..." % bucket_name)
bucket.delete()
-print "done."
+print("done.")
sys.exit(0)
cfg.read(config_locations)
else:
if not os.path.isfile(args.config):
- print "Could not open configuration file %s" % args.config
+ print("Could not open configuration file %s" % args.config)
sys.exit(1)
cfg.read(args.config)
# Initialize the configuration and generate the Application
config = init_config()
if config == None:
- print "Could not parse configuration file. Tried to parse %s" % config_locations
+ print("Could not parse configuration file. "
+ "Tried to parse %s" % config_locations)
sys.exit(1)
app = generate_app(config)
try:
key.set_contents_from_string(content)
except Exception as e:
- print 'Error: ' + str(e)
+ print('Error: ' + str(e))
# HTTP endpoint functions
break
except Exception as error:
remaining_retries -= 1
- print 'failed to connect to rabbitmq (remaining retries ' + str(remaining_retries) + '): ' + str(error)
+ print('failed to connect to rabbitmq (remaining retries '
+ + str(remaining_retries) + '): ' + str(error))
if remaining_retries == 0:
raise Exception('failed to connect to rabbitmq - no retries left')
proc = subprocess.Popen('rabbitmq-server')
except Exception as error:
log.info('failed to execute rabbitmq-server: %s', str(error))
- print 'failed to execute rabbitmq-server: %s' % str(error)
+ print('failed to execute rabbitmq-server: %s' % str(error))
return None
# TODO add rabbitmq checkpoint instead of sleep
time.sleep(5)
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
- print 'Zonegroup: ' + zonegroup.name
- print 'user: ' + get_user()
- print 'tenant: ' + get_tenant()
- print 'Master Zone'
+ print('Zonegroup: ' + zonegroup.name)
+ print('user: ' + get_user())
+ print('tenant: ' + get_tenant())
+ print('Master Zone')
print_connection_info(zones[0].conn)
- print 'PubSub Zone'
+ print('PubSub Zone')
print_connection_info(ps_zones[0].conn)
- print 'Bucket: ' + bucket_name
+ print('Bucket: ' + bucket_name)
def test_ps_s3_notification_low_level():
assert_equal(status/100, 2)
skip_notif4 = False
except Exception as error:
- print 'note: metadata filter is not supported by boto3 - skipping test'
+ print('note: metadata filter is not supported by boto3 - skipping test')
skip_notif4 = True
else:
- print 'filtering by attributes only supported on master zone'
+ print('filtering by attributes only supported on master zone')
skip_notif4 = True
key.set_contents_from_string('bar')
if on_master:
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
else:
zone_bucket_checkpoint(ps_zone.zone, zones[0].zone, bucket_name)
try:
result, status = s3_notification_conf.set_config()
except Exception as error:
- print str(error) + ' - is expected'
+ print(str(error) + ' - is expected')
else:
assert False, 'invalid event name is expected to fail'
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
- print str(error) + ' - is expected'
+ print(str(error) + ' - is expected')
else:
assert False, 'missing notification name is expected to fail'
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
- print str(error) + ' - is expected'
+ print(str(error) + ' - is expected')
else:
assert False, 'invalid ARN is expected to fail'
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
- print str(error) + ' - is expected'
+ print(str(error) + ' - is expected')
else:
assert False, 'unknown topic is expected to fail'
try:
_, _ = s3_notification_conf.set_config()
except Exception as error:
- print str(error) + ' - is expected'
+ print(str(error) + ' - is expected')
else:
assert False, 'unknown bucket is expected to fail'
bucket_name = gen_bucket_name()
bucket = zones[0].create_bucket(bucket_name)
# create objects in the bucket (async)
- print 'creating objects...'
+ print('creating objects...')
number_of_objects = 1000
client_threads = []
start_time = time.time()
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for object creation: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'total number of objects: ' + str(len(list(bucket.list())))
+ print('total number of objects: ' + str(len(list(bucket.list()))))
- print 'deleting objects...'
+ print('deleting objects...')
client_threads = []
start_time = time.time()
for key in bucket.list():
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for object deletion: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
# cleanup
zones[0].delete_bucket(bucket_name)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
keys = list(bucket.list())
- print 'total number of objects: ' + str(len(keys))
+ print('total number of objects: ' + str(len(keys)))
receiver1.verify_s3_events(keys, exact_match=True)
receiver2.verify_s3_events(keys, exact_match=True)
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver 1 for deletions
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
keys = list(bucket.list())
- print 'total number of objects: ' + str(len(keys))
+ print('total number of objects: ' + str(len(keys)))
http_server.verify_s3_events(keys, exact_match=True)
# delete objects from the bucket
[thr.join() for thr in client_threads]
time_diff = time.time() - start_time
- print 'average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds'
+ print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check http receiver
uploader.complete_upload()
fp.close()
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
uploader.complete_upload()
fp.close()
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
key.set_metadata('meta1', 'This is my metadata value')
key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
keys = list(bucket.list())
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
receiver.verify_s3_events(keys, exact_match=True)
bucket.delete_key(key.name, version_id=v1)
delete_marker_key.delete()
- print 'wait for 5sec for the messages...'
+ print('wait for 5sec for the messages...')
time.sleep(5)
# check amqp receiver
topics = client.list_topics()['Topics']
for topic in topics:
- print 'topic cleanup, deleting: ' + topic['TopicArn']
+ print('topic cleanup, deleting: ' + topic['TopicArn'])
assert client.delete_topic(TopicArn=topic['TopicArn'])['ResponseMetadata']['HTTPStatusCode'] == 200
except:
- print 'failed to do topic cleanup. if there are topics they may need to be manually deleted'
+ print('failed to do topic cleanup. if there are topics '
+ 'they may need to be manually deleted')
class PSTopicS3:
num_az_zones = cfg.getint(section, 'num_az_zones')
num_ps_zones = args.num_ps_zones if num_ps_zones_from_conf == 0 else num_ps_zones_from_conf
- print 'num_ps_zones = ' + str(num_ps_zones)
+ print('num_ps_zones = ' + str(num_ps_zones))
num_zones = args.num_zones + num_es_zones + num_cloud_zones + num_ps_zones + num_az_zones