]> git-server-git.apps.pok.os.sepia.ceph.com Git - s3-tests.git/commitdiff
s3tests: improve cross region copy, sync meta
authorYehuda Sadeh <yehuda@inktank.com>
Sat, 27 Jul 2013 03:33:48 +0000 (20:33 -0700)
committerYehuda Sadeh <yehuda@inktank.com>
Sat, 27 Jul 2013 03:33:48 +0000 (20:33 -0700)
Can now configure sync agent rest address in order to force
a sync operation. Another option is to set a waiting time for
meta sync.

Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
s3tests/functional/__init__.py
s3tests/functional/test_s3.py

index 69bc1be93e20f15f9cee05c025d617126c79eafb..88b17e8bdcd4711c6fc0314fc1350fabc2c1fe29 100644 (file)
@@ -84,6 +84,9 @@ class TargetConfig:
         self.api_name = ''
         self.is_master = False
         self.is_secure = False
+        self.sync_agent_addr = None
+        self.sync_agent_port = 0
+        self.sync_meta_wait = 0
         try:
             self.api_name = cfg.get(section, 'api_name')
         except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
@@ -113,6 +116,22 @@ class TargetConfig:
         except ConfigParser.NoOptionError:
             raw_calling_format = 'ordinary'
 
+        try:
+            self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
+        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+            pass
+
+        try:
+            self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
+        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+            pass
+
+        try:
+            self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
+        except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+            pass
+
+
         try:
             self.calling_format = calling_formats[raw_calling_format]
         except KeyError:
index 98ba8b4a2bf33cb47865e76fc83bd1e674b12da7..483bb16afd0c38e20a234040847af43caa955e53 100644 (file)
@@ -4,6 +4,7 @@ import boto.s3.connection
 import boto.s3.acl
 import bunch
 import datetime
+import time
 import email.utils
 import isodate
 import nose
@@ -4559,29 +4560,67 @@ def test_region_bucket_create_secondary_access_remove_master():
 
         conn.delete_bucket(bucket)
 
+def region_sync_meta(conf):
+    if conf.sync_agent_addr:
+        ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
+        eq(ret.status_code, 200)
+    if conf.sync_meta_wait:
+        time.sleep(conf.sync_meta_wait)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='create on one region, access in another')
+@attr(assertion='can\'t access in other region')
+@attr('multiregion')
+def test_region_bucket_create_master_access_remove_secondary():
+    assert_can_test_multiregion()
+
+    master = targets.main.master
+    master_conn = master.connection
+
+    for r in targets.main.secondaries:
+        conn = r.connection
+        bucket = get_new_bucket(master)
+
+        region_sync_meta(r.conf)
+
+        e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name)
+        eq(e.status, 301)
+
+        e = assert_raises(boto.exception.S3ResponseError, conn.delete_bucket, bucket.name)
+        eq(e.status, 301)
+
+
+        master_conn.delete_bucket(bucket)
+
 @attr(resource='object')
 @attr(method='copy')
-@attr(operation='cread object in one region, read in another')
+@attr(operation='copy object between regions, verify')
 @attr(assertion='can read object')
 @attr('multiregion')
 def test_region_copy_object():
     assert_can_test_multiregion()
 
     master = targets.main.master
-
     master_conn = master.connection
 
     master_bucket = get_new_bucket(master)
-    for r in targets.main.secondaries:
-        conn = r.connection
-        bucket = get_new_bucket(r)
+    for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024,
+                      100 * 1024 * 1024):
+        for r in targets.main.secondaries:
+            conn = r.connection
+            bucket = get_new_bucket(r)
 
-        content = 'testcontent'
+            content = 'testcontent'
 
-        key = bucket.new_key('testobj')
-        key.set_contents_from_string(content)
+            key = bucket.new_key('testobj')
+            fp_a = FakeWriteFile(file_size, 'A')
+            key.set_contents_from_file(fp_a)
 
-        master_bucket.copy_key('testobj-dest', bucket.name, key.name)
+            dest_key = master_bucket.copy_key('testobj-dest', bucket.name, key.name)
 
-        bucket.delete_key(key.name)
-        conn.delete_bucket(bucket)
+            # verify dest
+            _verify_atomic_key_data(dest_key, file_size, 'A')
+
+            bucket.delete_key(key.name)
+            conn.delete_bucket(bucket)