]> git-server-git.apps.pok.os.sepia.ceph.com Git - s3-tests.git/commitdiff
s3tests: some more multiregion changes
authorYehuda Sadeh <yehuda@inktank.com>
Fri, 26 Jul 2013 17:46:56 +0000 (10:46 -0700)
committerYehuda Sadeh <yehuda@inktank.com>
Fri, 26 Jul 2013 17:48:45 +0000 (10:48 -0700)
Now creating a connection per region for each user, can access
master and secondaries, and set a default region.
No longer using a specific region per user, as it doesn't
make sense.

Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
s3tests/functional/__init__.py
s3tests/functional/test_s3.py

index f4644bb81f11621be06a7575f53456448252f8a1..69bc1be93e20f15f9cee05c025d617126c79eafb 100644 (file)
@@ -144,10 +144,32 @@ class RegionsInfo:
         else:
             self.secondaries.append(region_config)
     def get(self, name):
-        return self.m[name];
+        return self.m[name]
+    def get(self):
+        return self.m
+    def iteritems(self):
+        return self.m.iteritems()
 
 regions = RegionsInfo()
 
+
+class RegionsConn:
+    def __init__(self):
+        self.m = bunch.Bunch()
+        self.default = None
+        self.master = None
+        self.secondaries = []
+
+    def add(self, name, conn):
+        self.m[name] = conn
+        if not self.default:
+            self.default = conn
+        if (conn.conf.is_master):
+            self.master = conn
+        else:
+            self.secondaries.append(conn)
+
+
 # nosetests --processes=N with N>1 is safe
 _multiprocess_can_split_ = True
 
@@ -193,11 +215,8 @@ def setup():
         if type_ != 's3':
             continue
 
-        try:
-            region_name = cfg.get(section, 'region')
-            region_config = regions.get(region_name)
-        except ConfigParser.NoOptionError:
-            region_config = TargetConfig(cfg, section)
+        if len(regions.get()) == 0:
+            regions.add("default", TargetConfig(cfg, section))
 
         config[name] = bunch.Bunch()
         for var in [
@@ -209,17 +228,21 @@ def setup():
                 config[name][var] = cfg.get(section, var)
             except ConfigParser.NoOptionError:
                 pass
-        conn = boto.s3.connection.S3Connection(
-            aws_access_key_id=cfg.get(section, 'access_key'),
-            aws_secret_access_key=cfg.get(section, 'secret_key'),
-            is_secure=region_config.is_secure,
-            port=region_config.port,
-            host=region_config.host,
-            # TODO test vhost calling format
-            calling_format=region_config.calling_format,
-            )
-        s3[name] = conn
-        targets[name] = TargetConnection(region_config, conn)
+
+        targets[name] = RegionsConn()
+
+        for (k, conf) in regions.iteritems():
+            conn = boto.s3.connection.S3Connection(
+                aws_access_key_id=cfg.get(section, 'access_key'),
+                aws_secret_access_key=cfg.get(section, 'secret_key'),
+                is_secure=conf.is_secure,
+                port=conf.port,
+                host=conf.host,
+                # TODO test vhost calling format
+                calling_format=conf.calling_format,
+                )
+            targets[name].add(k, TargetConnection(conf, conn))
+        s3[name] = targets[name].default.connection
 
     # WARNING! we actively delete all buckets we see with the prefix
     # we've chosen! Choose your prefix with care, and don't reuse
@@ -262,7 +285,7 @@ def get_new_bucket(target=None, name=None, headers=None):
     reset ACLs and such.
     """
     if target is None:
-        target = targets.main
+        target = targets.main.default
     connection = target.connection
     if name is None:
         name = get_new_bucket_name()
index 641e415e5667cdf748b96ea3d55efe35df6471be..09e4cef69874eac124f11e369c3b1cf7d641f598 100644 (file)
@@ -753,7 +753,7 @@ def test_object_write_to_nonexist_bucket():
 def test_bucket_create_delete():
     name = '{prefix}foo'.format(prefix=get_prefix())
     print 'Trying bucket {name!r}'.format(name=name)
-    bucket = get_new_bucket(targets.main, name)
+    bucket = get_new_bucket(targets.main.default, name)
     # make sure it's actually there
     s3.main.get_bucket(bucket.name)
     bucket.delete()
@@ -2312,7 +2312,7 @@ def check_bad_bucket_name(name):
     Attempt to create a bucket with a specified name, and confirm
     that the request fails because of an invalid bucket name.
     """
-    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, name)
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
     eq(e.status, 400)
     eq(e.reason, 'Bad Request')
     eq(e.error_code, 'InvalidBucketName')
@@ -2338,7 +2338,7 @@ def test_bucket_create_naming_bad_starts_nonalpha():
 def test_bucket_create_naming_bad_short_empty():
     # bucket creates where name is empty look like PUTs to the parent
     # resource (with slash), hence their error response is different
-    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main, '')
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, '')
     eq(e.status, 405)
     eq(e.reason, 'Method Not Allowed')
     eq(e.error_code, 'MethodNotAllowed')
@@ -2385,7 +2385,7 @@ def check_good_bucket_name(name, _prefix=None):
     # should be very rare
     if _prefix is None:
         _prefix = get_prefix()
-    get_new_bucket(targets.main, '{prefix}{name}'.format(
+    get_new_bucket(targets.main.default, '{prefix}{name}'.format(
             prefix=_prefix,
             name=name,
             ))
@@ -2399,7 +2399,7 @@ def _test_bucket_create_naming_good_long(length):
     prefix = get_prefix()
     assert len(prefix) < 255
     num = length - len(prefix)
-    get_new_bucket(targets.main, '{prefix}{name}'.format(
+    get_new_bucket(targets.main.default, '{prefix}{name}'.format(
             prefix=prefix,
             name=num*'a',
             ))
@@ -2474,7 +2474,7 @@ def test_bucket_list_long_name():
     prefix = get_prefix()
     length = 251
     num = length - len(prefix)
-    bucket = get_new_bucket(targets.main, '{prefix}{name}'.format(
+    bucket = get_new_bucket(targets.main.default, '{prefix}{name}'.format(
             prefix=prefix,
             name=num*'a',
             ))
@@ -2572,9 +2572,9 @@ def test_bucket_create_naming_dns_dash_dot():
 @attr(operation='re-create')
 @attr(assertion='idempotent success')
 def test_bucket_create_exists():
-    bucket = get_new_bucket(targets.main)
+    bucket = get_new_bucket(targets.main.default)
     # REST idempotency means this should be a nop
-    get_new_bucket(targets.main, bucket.name)
+    get_new_bucket(targets.main.default, bucket.name)
 
 
 @attr(resource='bucket')
@@ -2585,7 +2585,7 @@ def test_bucket_create_exists_nonowner():
     # Names are shared across a global namespace. As such, no two
     # users can create a bucket with that same name.
     bucket = get_new_bucket()
-    e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt, bucket.name)
+    e = assert_raises(boto.exception.S3CreateError, get_new_bucket, targets.alt.default, bucket.name)
     eq(e.status, 409)
     eq(e.reason, 'Conflict')
     eq(e.error_code, 'BucketAlreadyExists')
@@ -2908,7 +2908,7 @@ def test_object_acl_canned_authenticatedread():
 @attr(operation='acl bucket-owner-read')
 @attr(assertion='read back expected values')
 def test_object_acl_canned_bucketownerread():
-    bucket = get_new_bucket(targets.main)
+    bucket = get_new_bucket(targets.main.default)
     bucket.set_acl('public-read-write')
 
     key = s3.alt.get_bucket(bucket.name).new_key('foo')
@@ -2952,7 +2952,7 @@ def test_object_acl_canned_bucketownerread():
 @attr(operation='acl bucket-owner-read')
 @attr(assertion='read back expected values')
 def test_object_acl_canned_bucketownerfullcontrol():
-    bucket = get_new_bucket(targets.main)
+    bucket = get_new_bucket(targets.main.default)
     bucket.set_acl('public-read-write')
 
     key = s3.alt.get_bucket(bucket.name).new_key('foo')
@@ -3461,7 +3461,7 @@ def test_object_header_acl_grants():
 @attr('fails_on_dho')
 def test_bucket_header_acl_grants():
     headers = _get_acl_header()
-    bucket = get_new_bucket(targets.main, get_prefix(), headers)
+    bucket = get_new_bucket(targets.main.default, get_prefix(), headers)
 
     policy = bucket.get_acl()
     check_grants(
@@ -3596,7 +3596,7 @@ def test_bucket_acl_revoke_all():
 @attr('fails_on_rgw')
 def test_logging_toggle():
     bucket = get_new_bucket()
-    log_bucket = get_new_bucket(targets.main, bucket.name + '-log')
+    log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
     log_bucket.set_as_logging_target()
     bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
     bucket.disable_logging()
@@ -3908,7 +3908,7 @@ def test_bucket_recreate_not_overriding():
     names = [e.name for e in list(li)]
     eq(names, key_names)
 
-    bucket2 = get_new_bucket(targets.main, bucket.name)
+    bucket2 = get_new_bucket(targets.main.default, bucket.name)
 
     li = bucket.list()
 
@@ -4001,7 +4001,7 @@ def test_object_copy_diff_bucket():
 @attr(operation='copy from an inaccessible bucket')
 @attr(assertion='fails w/AttributeError')
 def test_object_copy_not_owned_bucket():
-    buckets = [get_new_bucket(), get_new_bucket(targets.alt)]
+    buckets = [get_new_bucket(), get_new_bucket(targets.alt.default)]
     print repr(buckets[1])
     key = buckets[0].new_key('foo123bar')
     key.set_contents_from_string('foo')