]> git-server-git.apps.pok.os.sepia.ceph.com Git - s3-tests.git/commitdiff
s3tests: modify cross region copy, sync triggering
authorYehuda Sadeh <yehuda@inktank.com>
Sat, 27 Jul 2013 05:19:36 +0000 (22:19 -0700)
committerYehuda Sadeh <yehuda@inktank.com>
Sat, 27 Jul 2013 05:19:36 +0000 (22:19 -0700)
Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
s3tests/functional/__init__.py
s3tests/functional/test_s3.py

index 88b17e8bdcd4711c6fc0314fc1350fabc2c1fe29..a69a1757a5f09c0cb4c9db4fc4ac8ede8b1fe4ac 100644 (file)
@@ -179,6 +179,9 @@ class RegionsConn:
         self.master = None
         self.secondaries = []
 
+    def iteritems(self):
+        return self.m.iteritems()
+
     def add(self, name, conn):
         self.m[name] = conn
         if not self.default:
index 483bb16afd0c38e20a234040847af43caa955e53..ea93b54005f8a0d67499d88a00e35a17f5cd1b20 100644 (file)
@@ -4560,12 +4560,18 @@ def test_region_bucket_create_secondary_access_remove_master():
 
         conn.delete_bucket(bucket)
 
-def region_sync_meta(conf):
-    if conf.sync_agent_addr:
-        ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
-        eq(ret.status_code, 200)
-    if conf.sync_meta_wait:
-        time.sleep(conf.sync_meta_wait)
+# syncs all the regions except for the one passed in
+def region_sync_meta(targets, region):
+
+    for (k, r) in targets.iteritems():
+        if r == region:
+            continue
+        conf = r.conf
+        if conf.sync_agent_addr:
+            ret = requests.post('http://{addr}:{port}/metadata/partial'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
+            eq(ret.status_code, 200)
+        if conf.sync_meta_wait:
+            time.sleep(conf.sync_meta_wait)
 
 @attr(resource='bucket')
 @attr(method='get')
@@ -4582,7 +4588,7 @@ def test_region_bucket_create_master_access_remove_secondary():
         conn = r.connection
         bucket = get_new_bucket(master)
 
-        region_sync_meta(r.conf)
+        region_sync_meta(targets.main, master)
 
         e = assert_raises(boto.exception.S3ResponseError, conn.get_bucket, bucket.name)
         eq(e.status, 301)
@@ -4590,7 +4596,6 @@ def test_region_bucket_create_master_access_remove_secondary():
         e = assert_raises(boto.exception.S3ResponseError, conn.delete_bucket, bucket.name)
         eq(e.status, 301)
 
-
         master_conn.delete_bucket(bucket)
 
 @attr(resource='object')
@@ -4601,26 +4606,39 @@ def test_region_bucket_create_master_access_remove_secondary():
 def test_region_copy_object():
     assert_can_test_multiregion()
 
-    master = targets.main.master
-    master_conn = master.connection
+    for (k, dest) in targets.main.iteritems():
+        dest_conn = dest.connection
+
+        dest_bucket = get_new_bucket(dest)
+        print 'created new dest bucket ', dest_bucket.name
+        region_sync_meta(targets.main, dest)
+
+        for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024,
+                          100 * 1024 * 1024):
+            for (k2, r) in targets.main.iteritems():
+                if r == dest_conn:
+                    continue
+                conn = r.connection
+
+                bucket = get_new_bucket(r)
+                print 'created bucket', bucket.name
+                region_sync_meta(targets.main, r)
+
+                content = 'testcontent'
 
-    master_bucket = get_new_bucket(master)
-    for file_size in (1024, 1024 * 1024, 10 * 1024 * 1024,
-                      100 * 1024 * 1024):
-        for r in targets.main.secondaries:
-            conn = r.connection
-            bucket = get_new_bucket(r)
+                key = bucket.new_key('testobj')
+                fp_a = FakeWriteFile(file_size, 'A')
+                key.set_contents_from_file(fp_a)
 
-            content = 'testcontent'
+                dest_key = dest_bucket.copy_key('testobj-dest', bucket.name, key.name)
 
-            key = bucket.new_key('testobj')
-            fp_a = FakeWriteFile(file_size, 'A')
-            key.set_contents_from_file(fp_a)
+                # verify dest
+                _verify_atomic_key_data(dest_key, file_size, 'A')
 
-            dest_key = master_bucket.copy_key('testobj-dest', bucket.name, key.name)
+                bucket.delete_key(key.name)
+                print 'removing bucket', bucket.name
+                conn.delete_bucket(bucket)
 
-            # verify dest
-            _verify_atomic_key_data(dest_key, file_size, 'A')
+                dest_bucket.delete_key(dest_key.name)
 
-            bucket.delete_key(key.name)
-            conn.delete_bucket(bucket)
+        dest_conn.delete_bucket(dest_bucket)