else:
self.secondaries.append(region_config)
def get(self, name):
- return self.m[name];
+ return self.m[name]
+ def get(self):
+ return self.m
+ def iteritems(self):
+ return self.m.iteritems()
regions = RegionsInfo()
+
+class RegionsConn:
+ def __init__(self):
+ self.m = bunch.Bunch()
+ self.default = None
+ self.master = None
+ self.secondaries = []
+
+ def add(self, name, conn):
+ self.m[name] = conn
+ if not self.default:
+ self.default = conn
+ if (conn.conf.is_master):
+ self.master = conn
+ else:
+ self.secondaries.append(conn)
+
+
# nosetests --processes=N with N>1 is safe
_multiprocess_can_split_ = True
if type_ != 's3':
continue
- try:
- region_name = cfg.get(section, 'region')
- region_config = regions.get(region_name)
- except ConfigParser.NoOptionError:
- region_config = TargetConfig(cfg, section)
+ if len(regions.get()) == 0:
+ regions.add("default", TargetConfig(cfg, section))
config[name] = bunch.Bunch()
for var in [
config[name][var] = cfg.get(section, var)
except ConfigParser.NoOptionError:
pass
- conn = boto.s3.connection.S3Connection(
- aws_access_key_id=cfg.get(section, 'access_key'),
- aws_secret_access_key=cfg.get(section, 'secret_key'),
- is_secure=region_config.is_secure,
- port=region_config.port,
- host=region_config.host,
- # TODO test vhost calling format
- calling_format=region_config.calling_format,
- )
- s3[name] = conn
- targets[name] = TargetConnection(region_config, conn)
+
+ targets[name] = RegionsConn()
+
+ for (k, conf) in regions.iteritems():
+ conn = boto.s3.connection.S3Connection(
+ aws_access_key_id=cfg.get(section, 'access_key'),
+ aws_secret_access_key=cfg.get(section, 'secret_key'),
+ is_secure=conf.is_secure,
+ port=conf.port,
+ host=conf.host,
+ # TODO test vhost calling format
+ calling_format=conf.calling_format,
+ )
+ targets[name].add(k, TargetConnection(conf, conn))
+ s3[name] = targets[name].default.connection
# WARNING! we actively delete all buckets we see with the prefix
# we've chosen! Choose your prefix with care, and don't reuse
reset ACLs and such.
"""
if target is None:
- target = targets.main
+ target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
def test_bucket_create_delete():
name = '{prefix}foo'.format(prefix=get_prefix())
print 'Trying bucket {name!r}'.format(name=name)
- bucket = get_new_bucket(targets.main, name)
+ bucket = get_new_bucket(targets.main.default, name)
# make sure it's actually there
s3.main.get_bucket(bucket.name)
bucket.delete()
Attempt to create a bucket with a specified name, and confirm
that the request fails because of an invalid bucket name.
"""
- e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, name)
+ e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
eq(e.status, 400)
eq(e.reason, 'Bad Request')
eq(e.error_code, 'InvalidBucketName')
def test_bucket_create_naming_bad_short_empty():
# bucket creates where name is empty look like PUTs to the parent
# resource (with slash), hence their error response is different
- e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, '')
+ e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, '')
eq(e.status, 405)
eq(e.reason, 'Method Not Allowed')
eq(e.error_code, 'MethodNotAllowed')
# should be very rare
if _prefix is None:
_prefix = get_prefix()
- get_new_bucket(targets.main, '{prefix}{name}'.format(
+ get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=_prefix,
name=name,
))
prefix = get_prefix()
assert len(prefix) < 255
num = length - len(prefix)
- get_new_bucket(targets.main, '{prefix}{name}'.format(
+ get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix,
name=num*'a',
))
prefix = get_prefix()
length = 251
num = length - len(prefix)
- bucket = get_new_bucket(targets.main, '{prefix}{name}'.format(
+ bucket = get_new_bucket(targets.main.default, '{prefix}{name}'.format(
prefix=prefix,
name=num*'a',
))
@attr(operation='re-create')
@attr(assertion='idempotent success')
def test_bucket_create_exists():
- bucket = get_new_bucket(targets.main)
+ bucket = get_new_bucket(targets.main.default)
# REST idempotency means this should be a nop
- get_new_bucket(targets.main, bucket.name)
+ get_new_bucket(targets.main.default, bucket.name)
@attr(resource='bucket')
# Names are shared across a global namespace. As such, no two
# users can create a bucket with that same name.
bucket = get_new_bucket()
- e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt, bucket.name)
+ e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt.default, bucket.name)
eq(e.status, 409)
eq(e.reason, 'Conflict')
eq(e.error_code, 'BucketAlreadyExists')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerread():
- bucket = get_new_bucket(targets.main)
+ bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo')
@attr(operation='acl bucket-owner-read')
@attr(assertion='read back expected values')
def test_object_acl_canned_bucketownerfullcontrol():
- bucket = get_new_bucket(targets.main)
+ bucket = get_new_bucket(targets.main.default)
bucket.set_acl('public-read-write')
key = s3.alt.get_bucket(bucket.name).new_key('foo')
@attr('fails_on_dho')
def test_bucket_header_acl_grants():
headers = _get_acl_header()
- bucket = get_new_bucket(targets.main, get_prefix(), headers)
+ bucket = get_new_bucket(targets.main.default, get_prefix(), headers)
policy = bucket.get_acl()
check_grants(
@attr('fails_on_rgw')
def test_logging_toggle():
bucket = get_new_bucket()
- log_bucket = get_new_bucket(targets.main, bucket.name + '-log')
+ log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
log_bucket.set_as_logging_target()
bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
bucket.disable_logging()
names = [e.name for e in list(li)]
eq(names, key_names)
- bucket2 = get_new_bucket(targets.main, bucket.name)
+ bucket2 = get_new_bucket(targets.main.default, bucket.name)
li = bucket.list()
@attr(operation='copy from an inaccessible bucket')
@attr(assertion='fails w/AttributeError')
def test_object_copy_not_owned_bucket():
- buckets = [get_new_bucket(), get_new_bucket(targets.alt)]
+ buckets = [get_new_bucket(), get_new_bucket(targets.alt.default)]
print repr(buckets[1])
key = buckets[0].new_key('foo123bar')
key.set_contents_from_string('foo')