return True
-def create_bucket_per_zone(zonegroup_conns):
+def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1):
buckets = []
- zone_bucket = {}
+ zone_bucket = []
for zone in zonegroup_conns.rw_zones:
- bucket_name = gen_bucket_name()
- log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
- bucket = zone.create_bucket(bucket_name)
- buckets.append(bucket_name)
- zone_bucket[zone] = bucket
+ for i in xrange(buckets_per_zone):
+ bucket_name = gen_bucket_name()
+ log.info('create bucket zone=%s name=%s', zone.name, bucket_name)
+ bucket = zone.create_bucket(bucket_name)
+ buckets.append(bucket_name)
+ zone_bucket.append((zone, bucket))
return buckets, zone_bucket
def create_bucket_per_zone_in_realm():
buckets = []
- zone_bucket = {}
+ zone_bucket = []
for zonegroup in realm.current_period.zonegroups:
zg_conn = ZonegroupConns(zonegroup)
b, z = create_bucket_per_zone(zg_conn)
buckets.extend(b)
- zone_bucket.update(z)
+ zone_bucket.extend(z)
return buckets, zone_bucket
def test_bucket_create():
for zone in zonegroup_conns.zones:
assert check_all_buckets_exist(zone, buckets)
- for zone, bucket_name in zone_bucket.items():
+ for zone, bucket_name in zone_bucket:
zone.conn.delete_bucket(bucket_name)
zonegroup_meta_checkpoint(zonegroup)
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket_name in zone_bucket.items():
+ for zone, bucket_name in zone_bucket:
for objname in objnames:
k = new_key(zone, bucket_name, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
- for source_conn, bucket in zone_bucket.items():
+ for source_conn, bucket in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
content = 'asdasd'
# don't wait for meta sync just yet
- for zone, bucket in zone_bucket.items():
+ for zone, bucket in zone_bucket:
k = new_key(zone, bucket, objname)
k.set_contents_from_string(content)
zonegroup_meta_checkpoint(zonegroup)
# check object exists
- for source_conn, bucket in zone_bucket.items():
+ for source_conn, bucket in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
check_bucket_eq(source_conn, target_conn, bucket)
# check object removal
- for source_conn, bucket in zone_bucket.items():
+ for source_conn, bucket in zone_bucket:
k = get_key(source_conn, bucket, objname)
k.delete()
for target_conn in zonegroup_conns.zones:
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
# enable versioning
- for _, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
bucket.configure_versioning(True)
zonegroup_meta_checkpoint(zonegroup)
# upload a dummy object to each bucket and wait for sync. this forces each
# bucket to finish a full sync and switch to incremental
- for source_conn, bucket in zone_bucket.items():
+ for source_conn, bucket in zone_bucket:
new_key(source_conn, bucket, 'dummy').set_contents_from_string('')
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
- for _, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
# create and delete multiple versions of an object from each zone
for zone_conn in zonegroup_conns.rw_zones:
obj = 'obj-' + zone_conn.name
log.debug('version3 id=%s', v.version_id)
k.bucket.delete_key(obj, version_id=v.version_id)
- for source_conn, bucket in zone_bucket.items():
+ for source_conn, bucket in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
def test_bucket_versioning():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
- for _, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
bucket.configure_versioning(True)
res = bucket.get_versioning_status()
key = 'Versioning'
def test_bucket_acl():
buckets, zone_bucket = create_bucket_per_zone_in_realm()
- for _, bucket in zone_bucket.items():
+ for _, bucket in zone_bucket:
assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner
bucket.set_acl('public-read')
assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers
buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
zonegroup_meta_checkpoint(zonegroup)
- for zone_conn, bucket_name in zone_bucket.items():
+ for zone_conn, bucket_name in zone_bucket:
# upload an object to each bucket on its own zone
conn = zone_conn.get_connection()
bucket = conn.get_bucket(bucket_name)
# assert that each bucket still exists on the master
c1 = zonegroup_conns.master_zone.conn
- for _, bucket_name in zone_bucket.items():
+ for _, bucket_name in zone_bucket:
assert c1.get_bucket(bucket_name)
def test_multi_period_incremental_sync():
set_master_zone(z2)
mdlog_periods += [realm.current_period.id]
- for zone_conn, _ in zone_bucket.items():
+ for zone_conn, _ in zone_bucket:
if zone_conn.zone == z3:
continue
bucket_name = gen_bucket_name()
set_master_zone(z1)
mdlog_periods += [realm.current_period.id]
- for zone_conn, bucket_name in zone_bucket.items():
+ for zone_conn, bucket_name in zone_bucket:
if zone_conn.zone == z3:
continue
bucket_name = gen_bucket_name()
zonegroup_meta_checkpoint(zonegroup)
# verify that we end up with the same objects
- for source_conn, _ in zone_bucket.items():
- for bucket_name in buckets:
+ for bucket_name in buckets:
+ for source_conn, _ in zone_bucket:
for target_conn in zonegroup_conns.zones:
if source_conn.zone == target_conn.zone:
continue
- check_bucket_eq(source_conn, target_conn, bucket)
+ target_conn.check_bucket_eq(source_conn, bucket_name)
# verify that mdlogs are not empty and match for each period
for period in mdlog_periods:
return zone_conn.zone.tier_type() == "elasticsearch"
-def verify_search(src_keys, result_keys, f):
+def verify_search(bucket_name, src_keys, result_keys, f):
check_keys = []
for k in src_keys:
- log.debug('ZZZ ' + k.bucket.name)
+ if bucket_name:
+ if bucket_name != k.bucket.name:
+ continue
if f(k):
check_keys.append(k)
- check_keys.sort(key = lambda l: (l.name, l.version_id))
+ check_keys.sort(key = lambda l: (l.bucket.name, l.name, l.version_id))
log.debug('check keys:' + dump_json(check_keys))
log.debug('result keys:' + dump_json(result_keys))
else:
bucket_name = ''
req = MDSearch(conn, bucket_name, req_str)
- result_keys = req.search(sort_key = lambda k: (k.name, k.version_id))
- verify_search(src_keys, result_keys, src_filter)
+ result_keys = req.search(sort_key = lambda k: (k.bucket.name, k.name, k.version_id))
+ verify_search(bucket_name, src_keys, result_keys, src_filter)
def test_es_object_search():
check_es_configured()
realm = get_realm()
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
- buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 2)
min_size = 10
content = 'a' * min_size
etags = []
names = []
+ obj_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6))
+
# don't wait for meta sync just yet
- for zone, bucket in zone_bucket.items():
+ for zone, bucket in zone_bucket:
for count in xrange(0, max_keys):
- objname = 'foo' + str(count)
+ objname = obj_prefix + str(count)
k = new_key(zone, bucket.name, objname)
k.set_contents_from_string(content + 'x' * count)
zonegroup_meta_checkpoint(zonegroup)
- for source_conn, bucket in zone_bucket.items():
- for target_conn in zonegroup_conns.zones:
- if source_conn.zone == target_conn.zone:
- continue
- if not is_es_zone(target_conn):
- continue
+ targets = []
+ for target_conn in zonegroup_conns.zones:
+ if not is_es_zone(target_conn):
+ continue
+ targets.append(target_conn)
+
+ buckets = []
+ # make sure all targets are synced
+ for source_conn, bucket in zone_bucket:
+ buckets.append(bucket)
+ for target_conn in targets:
zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name)
+ for target_conn in targets:
+
+ # bucket checks
+ for bucket in buckets:
# check name
- do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: True)
+ do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name)
+ # check on all buckets
+ for key in src_keys:
+ # limiting to checking specific key name, otherwise could get results from
+ # other runs / tests
+ do_check_mdsearch(target_conn.conn, None, src_keys , 'name == ' + key.name, lambda k: k.name == key.name)
+
+ # check on specific bucket
+ for bucket in buckets:
for key in src_keys:
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name < ' + key.name, lambda k: k.name < key.name)
do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name <= ' + key.name, lambda k: k.name <= key.name)