import json
import sys
-from StringIO import StringIO
-
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
# XXX once we're done, break out the common code into a library module
raise TestException("failed command: user create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("failed command: user create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("failed command: user create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("failed command: key create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("failed command: key create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
raise TestException("failed command: key create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: key create --uid %s" % uid)
if not isinstance(outj, dict):
if ret != 0:
raise TestException("failed command: user create --uid %s" % uid)
try:
- outj = json.loads(out)
+ outj = json.loads(out.decode('utf-8'))
except ValueError:
raise TestException("invalid json after: user create --uid %s" % uid)
if not isinstance(outj, dict):
key = bucket.new_key(objname)
headers = { "Content-Type": "text/plain" }
- key.set_contents_from_string("Test5A\n", headers)
+ key.set_contents_from_string(b"Test5A\n", headers)
key.set_acl('public-read')
#
key = bucket.new_key(objname)
headers = { "Content-Type": "text/plain" }
- key.set_contents_from_string("Test5B\n", headers)
+ key.set_contents_from_string(b"Test5B\n", headers)
key.set_acl('public-read')
#
key = bucket.get_key(objname)
body = key.get_contents_as_string()
- if body != "Test5A\n":
+ if body != b"Test5A\n":
raise TestException("failed body check, bucket %s object %s" %
(bucketname, objname))
bucket = c.get_bucket("test5b:"+bucketname)
key = bucket.get_key(objname)
body = key.get_contents_as_string()
- if body != "Test5B\n":
+ if body != b"Test5B\n":
raise TestException(
"failed body check, tenant %s bucket %s object %s" %
("test5b", bucketname, objname))
import argparse
import sys
import time
-import itertools
-
-import ConfigParser
+try:
+ from itertools import izip_longest as zip_longest
+except ImportError:
+ from itertools import zip_longest
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
import boto
import boto.s3.connection
if p:
s += str(p)
- print s
+ print(s)
sys.stdout.flush()
def build_cmd(*params):
log(5, 'running cmd: ', cmd)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
s = process.communicate()[0]
- log(20, 'command returned status=', process.returncode, ' stdout=', s)
+ log(20, 'command returned status=', process.returncode, ' stdout=', s.decode('utf-8'))
if check_retcode:
assert(process.returncode == 0)
return (s, process.returncode)
return self.zones[zone_name]
def get_zones(self):
- for (k, zone) in self.zones.iteritems():
+ for (k, zone) in self.zones.items():
yield zone
def meta_sync_status(self, zone):
assert(retcode == 2) # ENOENT
+ meta_sync_status_json = meta_sync_status_json.decode('utf-8')
log(20, 'current meta sync status=', meta_sync_status_json)
sync_status = json.loads(meta_sync_status_json)
assert(num_shards == len(sync_markers))
markers={}
- for i in xrange(num_shards):
+ for i in range(num_shards):
markers[i] = sync_markers[i]['val']['marker']
return (num_shards, markers)
def meta_master_log_status(self, master_zone):
(mdlog_status_json, retcode) = master_zone.cluster.rgw_admin_ro('--rgw-realm=' + self.realm + ' mdlog status')
- mdlog_status = json.loads(mdlog_status_json)
+ mdlog_status = json.loads(mdlog_status_json.decode('utf-8'))
markers={}
i = 0
return False
msg = ''
- for i, l, s in zip(log_status, log_status.itervalues(), sync_status.itervalues()):
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(s) != 0:
msg += ', '
assert(retcode == 2) # ENOENT
+ data_sync_status_json = data_sync_status_json.decode('utf-8')
log(20, 'current data sync status=', data_sync_status_json)
sync_status = json.loads(data_sync_status_json)
assert(num_shards == len(sync_markers))
markers={}
- for i in xrange(num_shards):
+ for i in range(num_shards):
markers[i] = sync_markers[i]['val']['marker']
return (num_shards, markers)
assert(retcode == 2) # ENOENT
+ bucket_sync_status_json = bucket_sync_status_json.decode('utf-8')
log(20, 'current bucket sync status=', bucket_sync_status_json)
sync_status = json.loads(bucket_sync_status_json)
def data_source_log_status(self, source_zone):
source_cluster = source_zone.cluster
(datalog_status_json, retcode) = source_cluster.rgw_admin_ro('--rgw-realm=' + self.realm + ' datalog status')
- datalog_status = json.loads(datalog_status_json)
+ datalog_status = json.loads(datalog_status_json.decode('utf-8'))
markers={}
i = 0
cmd += ' --tenant=' + user.tenant + ' --uid=' + user.uid
source_cluster = source_zone.cluster
(bilog_status_json, retcode) = source_cluster.rgw_admin_ro(cmd)
- bilog_status = json.loads(bilog_status_json)
+ bilog_status = json.loads(bilog_status_json.decode('utf-8'))
m={}
markers={}
return False
msg = ''
- for i, l, s in zip(log_status, log_status.itervalues(), sync_status.itervalues()):
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(s) != 0:
msg += ', '
return False
msg = ''
- for i, l, s in zip(log_status, log_status.itervalues(), sync_status.itervalues()):
+ for i, l, s in zip(log_status, log_status.values(), sync_status.values()):
if l > s:
if len(s) != 0:
msg += ', '
def set_master_zone(self, zone):
(zg_json, retcode) = zone.cluster.rgw_admin('--rgw-realm=' + self.realm + ' --rgw-zonegroup=' + zone.zg + ' --rgw-zone=' + zone.zone_name + ' zone modify --master=1')
(period_json, retcode) = zone.cluster.rgw_admin('--rgw-realm=' + self.realm + ' period update --commit')
- self.master_zone = zone
+ self.master_zone = zone
class RGWUser:
self.base_port = 8000
self.clusters = {}
- for i in xrange(num_clusters):
+ for i in range(num_clusters):
self.clusters[i] = RGWCluster(i + 1, self.base_port + i)
def setup(self, bootstrap, tenant):
self.clusters[0].start()
realm.init_zone(self.clusters[0], 'us', 'us-1', self.base_port)
- for i in xrange(1, self.num_clusters):
+ for i in range(1, self.num_clusters):
self.clusters[i].start()
realm.init_zone(self.clusters[i], 'us', 'us-' + str(i + 1), self.base_port)
else:
- for i in xrange(0, self.num_clusters):
+ for i in range(0, self.num_clusters):
realm.add_zone(self.clusters[i], 'us', 'us-' + str(i + 1), (i == 0))
realm.meta_checkpoint()
for zone in realm.get_zones():
assert check_all_buckets_exist(zone, buckets)
- for zone, bucket_name in zone_bucket.iteritems():
+ for zone, bucket_name in zone_bucket.items():
conn = zone.get_connection(user)
conn.delete_bucket(bucket_name)
for o in b2.get_all_versions():
log(20, 'o=', o.name)
- for k1, k2 in itertools.izip_longest(b1.get_all_versions(), b2.get_all_versions()):
+ for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()):
if k1 is None:
log(0, 'failure: key=', k2.name, ' is missing from zone=', zone1.zone_name)
assert False
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket_name in zone_bucket.iteritems():
+ for zone, bucket_name in zone_bucket.items():
for objname in objnames:
k = new_key(zone, bucket_name, objname)
k.set_contents_from_string(content)
realm.meta_checkpoint()
- for source_zone, bucket in zone_bucket.iteritems():
+ for source_zone, bucket in zone_bucket.items():
for target_zone in all_zones:
if source_zone.zone_name == target_zone.zone_name:
continue
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket in zone_bucket.iteritems():
+ for zone, bucket in zone_bucket.items():
k = new_key(zone, bucket, objname)
k.set_contents_from_string(content)
realm.meta_checkpoint()
# check object exists
- for source_zone, bucket in zone_bucket.iteritems():
+ for source_zone, bucket in zone_bucket.items():
for target_zone in all_zones:
if source_zone.zone_name == target_zone.zone_name:
continue
check_bucket_eq(source_zone, target_zone, bucket)
# check object removal
- for source_zone, bucket in zone_bucket.iteritems():
+ for source_zone, bucket in zone_bucket.items():
k = get_key(source_zone, bucket, objname)
k.delete()
for target_zone in all_zones:
for z in zone_bucket:
all_zones.append(z)
- for zone, bucket_name in zone_bucket.iteritems():
+ for zone, bucket_name in zone_bucket.items():
for objname in [ 'p1', '_p1' ]:
k = new_key(zone, bucket_name, objname)
k.set_contents_from_string('asdasd')
# change master to zone 2 -> period 2
realm.set_master_zone(realm.get_zone('us-2'))
- for zone, bucket_name in zone_bucket.iteritems():
+ for zone, bucket_name in zone_bucket.items():
if zone == z3:
continue
for objname in [ 'p2', '_p2' ]:
# change master back to zone 1 -> period 3
realm.set_master_zone(realm.get_zone('us-1'))
- for zone, bucket_name in zone_bucket.iteritems():
+ for zone, bucket_name in zone_bucket.items():
if zone == z3:
continue
for objname in [ 'p3', '_p3' ]:
realm.meta_checkpoint()
# verify that we end up with the same objects
- for source_zone, bucket in zone_bucket.iteritems():
+ for source_zone, bucket in zone_bucket.items():
for target_zone in all_zones:
if source_zone.zone_name == target_zone.zone_name:
continue
realm.remove_zone('us-2')
def init(parse_args):
- cfg = ConfigParser.RawConfigParser({
+ cfg = configparser.RawConfigParser({
'num_zones': 3,
'no_bootstrap': 'false',
'log_level': 20,
path = tpath('test_multi.conf')
try:
- with file(path) as f:
+ with open(path) as f:
cfg.readfp(f)
except:
- print 'WARNING: error reading test config. Path can be set through the RGW_MULTI_TEST_CONF env variable'
+ print('WARNING: error reading test config. Path can be set through the RGW_MULTI_TEST_CONF env variable')
pass
parser = argparse.ArgumentParser(