from .multisite import Zone
+class Config:
+ """ test configuration """
+ def __init__(self, **kwargs):
+ # by default, wait up to 5 minutes before giving up on a sync checkpoint
+ self.checkpoint_retries = kwargs.get('checkpoint_retries', 60)
+ self.checkpoint_delay = kwargs.get('checkpoint_delay', 5)
+ # allow some time for realm reconfiguration after changing master zone
+ self.reconfigure_delay = kwargs.get('reconfigure_delay', 5)
+
# rgw multisite tests, written against the interfaces provided in rgw_multi.
# these tests must be initialized and run by another module that provides
# implementations of these interfaces by calling init_multi()
realm = None
user = None
-def init_multi(_realm, _user):
+config = None
+def init_multi(_realm, _user, _config=None):
global realm
realm = _realm
global user
user = _user
+ global config
+ config = _config or Config()
log = logging.getLogger(__name__)
return period, realm_epoch, num_shards, markers
def meta_sync_status(zone):
- for _ in range(60):
+ for _ in range(config.checkpoint_retries):
cmd = ['metadata', 'sync', 'status'] + zone.zone_args()
meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True)
if retcode == 0:
return parse_meta_sync_status(meta_sync_status_json)
assert(retcode == 2) # ENOENT
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
assert False, 'failed to read metadata sync status for zone=%s' % zone.name
log.info('starting meta checkpoint for zone=%s', zone.name)
- for _ in range(60):
+ for _ in range(config.checkpoint_retries):
period, realm_epoch, num_shards, sync_status = meta_sync_status(zone)
if realm_epoch < current_realm_epoch:
log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d',
log.info('finish meta checkpoint for zone=%s', zone.name)
return
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
assert False, 'failed meta checkpoint for zone=%s' % zone.name
def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None):
if target_zone == source_zone:
return None
- for _ in range(60):
+ for _ in range(config.checkpoint_retries):
cmd = ['data', 'sync', 'status'] + target_zone.zone_args()
cmd += ['--source-zone', source_zone.name]
data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True)
return parse_data_sync_status(data_sync_status_json)
assert(retcode == 2) # ENOENT
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \
(target_zone.name, source_zone.name)
log_status = data_source_log_status(source_zone)
log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name)
- for _ in range(60):
+ for _ in range(config.checkpoint_retries):
num_shards, sync_status = data_sync_status(target_zone, source_zone)
log.debug('log_status=%s', log_status)
log.info('finished data checkpoint for target_zone=%s source_zone=%s',
target_zone.name, source_zone.name)
return
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \
(target_zone.name, source_zone.name)
log_status = bucket_source_log_status(source_zone, bucket_name)
log.info('starting bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
- for _ in range(60):
+ for _ in range(config.checkpoint_retries):
sync_status = bucket_sync_status(target_zone, source_zone, bucket_name)
log.debug('log_status=%s', log_status)
log.info('finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s', target_zone.name, source_zone.name, bucket_name)
return
- time.sleep(5)
+ time.sleep(config.checkpoint_delay)
assert False, 'finished bucket checkpoint for target_zone=%s source_zone=%s bucket=%s' % \
(target_zone.name, source_zone.name, bucket_name)
zonegroup = zone.zonegroup
zonegroup.period.update(zone, commit=True)
zonegroup.master_zone = zone
- # wait for reconfiguration, so that later metadata requests go to the new master
- time.sleep(5)
+ log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay)
+ time.sleep(config.reconfigure_delay)
def gen_bucket_name():
global num_buckets