]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Multisite tests refactored to run both boto2 and boto3 rgw_system tests
authorShilpa Jagannath <smanjara@redhat.com>
Thu, 5 Jul 2018 11:58:47 +0000 (17:28 +0530)
committerShilpa Jagannath <smanjara@redhat.com>
Thu, 5 Jul 2018 11:58:47 +0000 (17:28 +0530)
30 files changed:
qa/suites/rgw/multisite-ansible/% [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/clusters/4-node.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/CEPH-10118.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/CEPH-10119.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/CEPH-10140.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/CEPH-10141.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets_sharding.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_create.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_delete.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_download.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_sharding.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets_with_Nobjects.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_acls.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_acls_all_usrs.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_acls_copy_obj.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_acls_reset.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_basic_versioning.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_bucket_with_delete.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_delete_key_versions.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_cancel.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_download.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_suspend_versioning.yaml [new file with mode: 0644]
qa/suites/rgw/multisite-ansible/tasks/test_version_with_revert.yaml [new file with mode: 0644]
qa/tasks/multisite_test.py [new file with mode: 0644]
qa/tasks/multisite_test_v1.py [new file with mode: 0644]
qa/tasks/new_rgw_multisite.py [new file with mode: 0644]
qa/tasks/verify_io.py [new file with mode: 0644]

diff --git a/qa/suites/rgw/multisite-ansible/% b/qa/suites/rgw/multisite-ansible/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rgw/multisite-ansible/clusters/4-node.yaml b/qa/suites/rgw/multisite-ansible/clusters/4-node.yaml
new file mode 100644 (file)
index 0000000..75ad625
--- /dev/null
@@ -0,0 +1,23 @@
+roles:
+- - c1.mon.a
+  - c1.mgr.x
+  - c1.mon.b
+  - c1.osd.0
+  - c1.osd.1
+  - c1.osd.2
+- - c1.osd.3
+  - c1.osd.4
+  - c1.osd.5
+  - c1.rgw.0
+  - c1.client.0
+- - c2.mon.a
+  - c2.mgr.y
+  - c2.mon.b
+  - c2.osd.0
+  - c2.osd.1
+  - c2.osd.2
+- - c2.osd.3
+  - c2.osd.4
+  - c2.osd.5
+  - c2.rgw.1
+  - c2.client.1
diff --git a/qa/suites/rgw/multisite-ansible/tasks/CEPH-10118.yaml b/qa/suites/rgw/multisite-ansible/tasks/CEPH-10118.yaml
new file mode 100644 (file)
index 0000000..366c057
--- /dev/null
@@ -0,0 +1,68 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new_rgw_multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v2
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+   duration: 60
+- multisite-test:
+     test-name: Mbuckets
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c1
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: false
+            max_shards: 0
+- sleep:
+   duration: 60
+- multisite-test.pull-io-info:
+- ceph-ipmi.poweroff: [c1.rgw.0]
+- multisite-test:
+     test-name: Mbuckets_with_Nobjects_create
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     objects_count: 10
+     objects_size_range:
+      min: 5
+      max: 15
+     test_ops:
+          create_object: true
+          download_object: false
+          delete_bucket_object: false
+- sleep:
+   duration: 60
+- ceph-ipmi.poweron: [c1.rgw.0]
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/CEPH-10119.yaml b/qa/suites/rgw/multisite-ansible/tasks/CEPH-10119.yaml
new file mode 100644 (file)
index 0000000..fe987df
--- /dev/null
@@ -0,0 +1,50 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new_rgw_multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+   duration: 60
+- ceph-ipmi.poweroff: [c2.rgw.1]
+- multisite-test:
+      master_client: c1.rgw.0
+      target_client: c1.rgw.0
+      target_config:
+        bucket_count: 2
+        objects_count: 10
+        max_file_size: 10
+        min_file_size: 5
+      test-name: test_Mbuckets_with_Nobjects
+      test_dir_version: v1
+- ceph-ipmi.poweron: [c2.rgw.1]
+- sleep:
+   duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/CEPH-10140.yaml b/qa/suites/rgw/multisite-ansible/tasks/CEPH-10140.yaml
new file mode 100644 (file)
index 0000000..5317b1e
--- /dev/null
@@ -0,0 +1,50 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new_rgw_multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           is_read_only: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+   duration: 60
+- multisite-test:
+      master_client: c1.rgw.0
+      target_client: c1.rgw.0
+      target_config:
+        bucket_count: 2
+        objects_count: 20
+        max_file_size: 10
+        min_file_size: 5
+      test-name: test_Mbuckets_with_Nobjects
+      test_dir_version: v1
+- sleep:
+   duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
+
diff --git a/qa/suites/rgw/multisite-ansible/tasks/CEPH-10141.yaml b/qa/suites/rgw/multisite-ansible/tasks/CEPH-10141.yaml
new file mode 100644 (file)
index 0000000..2d65f0e
--- /dev/null
@@ -0,0 +1,72 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           is_read_only: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+   duration: 60
+- ceph-ipmi.poweroff: [c1.rgw.0]
+- new-rgw-multisite.failover:
+- sleep:
+   duration: 30
+- multisite-test:
+      test-name: test_basic_versioning
+      test_dir_version: v1
+      master_client: c2.rgw.1
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 1
+          objects_count: 2
+          version_count: 3
+          min_file_size: 10
+          max_file_size: 20
+- ceph-ipmi.poweron: [c1.rgw.0]
+- sleep:
+   duration: 60
+- new-rgw-multisite.failback:
+- sleep:
+   duration: 120
+- verify-io:
+      verification_script: read_io_info
+- multisite-test:
+      test-name: test_Mbuckets_with_Nobjects
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c1.rgw.0
+      target_config:
+        bucket_count: 5
+        objects_count: 5
+        min_file_size: 5
+        max_file_size: 10
+- sleep:
+   duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets.yaml
new file mode 100644 (file)
index 0000000..a9434b9
--- /dev/null
@@ -0,0 +1,50 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c1
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: false
+            max_shards: 0
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
\ No newline at end of file
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_sharding.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_sharding.yaml
new file mode 100644 (file)
index 0000000..aaf00dc
--- /dev/null
@@ -0,0 +1,50 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets_with_sharding
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c2
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: true
+            max_shards: 32
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
\ No newline at end of file
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_create.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_create.yaml
new file mode 100644 (file)
index 0000000..6bbaec9
--- /dev/null
@@ -0,0 +1,66 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c1
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: false
+            max_shards: 0
+- sleep:
+     duration: 60
+- multisite-test.pull-io-info:
+- multisite-test:
+     test-name: Mbuckets_with_Nobjects_create
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     objects_count: 10
+     objects_size_range:
+      min: 5
+      max: 15
+     test_ops:
+          create_object: true
+          download_object: false
+          delete_bucket_object: false
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
\ No newline at end of file
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_delete.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_delete.yaml
new file mode 100644 (file)
index 0000000..eecbc65
--- /dev/null
@@ -0,0 +1,66 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c1
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: false
+            max_shards: 0
+- sleep:
+     duration: 60
+- multisite-test.pull-io-info:
+- multisite-test:
+     test-name: Mbuckets_with_Nobjects_delete
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     objects_count: 5
+     objects_size_range:
+      min: 5
+      max: 15
+     test_ops:
+          create_object: true
+          download_object: false
+          delete_bucket_object: true
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_download.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_download.yaml
new file mode 100644 (file)
index 0000000..ebe0382
--- /dev/null
@@ -0,0 +1,67 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c1
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: false
+            max_shards: 0
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- multisite-test:
+     test-name: Mbuckets_with_Nobjects_download
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     objects_count: 5
+     objects_size_range:
+      min: 5
+      max: 15
+     test_ops:
+          create_object: true
+          download_object: true
+          delete_bucket_object: false
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
+
diff --git a/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_sharding.yaml b/qa/suites/rgw/multisite-ansible/tasks/Mbuckets_with_Nobjects_sharding.yaml
new file mode 100644 (file)
index 0000000..10a62a3
--- /dev/null
@@ -0,0 +1,66 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      test_dir_version: v2
+      master_client: c1.rgw.0
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+     test-name: Mbuckets_with_sharding
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     bucket_count: 5
+     cluster_name: c2
+     test_ops:
+          create_bucket: true
+          sharding:
+            enable: true
+            max_shards: 32
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- multisite-test:
+     test-name: Mbuckets_with_Nobjects_sharding
+     test_dir_version: v2
+     master_client: c1.rgw.0
+     target_client: c2.rgw.1
+     objects_count: 5
+     objects_size_range:
+      min: 1000
+      max: 1500
+     test_ops:
+          create_object: true
+          download_object: false
+          delete_bucket_object: true
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets.yaml
new file mode 100644 (file)
index 0000000..5b9cea0
--- /dev/null
@@ -0,0 +1,44 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_Mbuckets
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      bucket_count: 5
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets_with_Nobjects.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_Mbuckets_with_Nobjects.yaml
new file mode 100644 (file)
index 0000000..b30d3ca
--- /dev/null
@@ -0,0 +1,47 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_Mbuckets_with_Nobjects
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      bucket_count: 5
+      objects_count: 5
+      min_file_size: 5
+      max_file_size: 10
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_acls.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_acls.yaml
new file mode 100644 (file)
index 0000000..b0aaa67
--- /dev/null
@@ -0,0 +1,47 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_acls
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_acls_all_usrs.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_acls_all_usrs.yaml
new file mode 100644 (file)
index 0000000..90223ed
--- /dev/null
@@ -0,0 +1,47 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_acls_all_usrs
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_acls_copy_obj.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_acls_copy_obj.yaml
new file mode 100644 (file)
index 0000000..de63571
--- /dev/null
@@ -0,0 +1,45 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_acls_copy_obj
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_acls_reset.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_acls_reset.yaml
new file mode 100644 (file)
index 0000000..ed30757
--- /dev/null
@@ -0,0 +1,46 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_acls_reset
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_basic_versioning.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_basic_versioning.yaml
new file mode 100644 (file)
index 0000000..78d3847
--- /dev/null
@@ -0,0 +1,49 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_basic_versioning
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_bucket_with_delete.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_bucket_with_delete.yaml
new file mode 100644 (file)
index 0000000..009bffa
--- /dev/null
@@ -0,0 +1,48 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_bucket_with_delete
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 5
+          min_file_size: 5
+          max_file_size: 10
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_delete_key_versions.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_delete_key_versions.yaml
new file mode 100644 (file)
index 0000000..917bd69
--- /dev/null
@@ -0,0 +1,49 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_delete_key_versions
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload.yaml
new file mode 100644 (file)
index 0000000..6349dd4
--- /dev/null
@@ -0,0 +1,47 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_multipart_upload
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          min_file_size: 5
+          max_file_size: 10
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_cancel.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_cancel.yaml
new file mode 100644 (file)
index 0000000..eec6c93
--- /dev/null
@@ -0,0 +1,48 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_multipart_upload_cancel
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          break_at_part_no: 10
+          min_file_size: 100
+          max_file_size: 200
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_download.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_multipart_upload_download.yaml
new file mode 100644 (file)
index 0000000..3d7e808
--- /dev/null
@@ -0,0 +1,47 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_multipart_upload_download
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          min_file_size: 100
+          max_file_size: 200
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_suspend_versioning.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_suspend_versioning.yaml
new file mode 100644 (file)
index 0000000..661219f
--- /dev/null
@@ -0,0 +1,49 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new_rgw_multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_suspend_versioning
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/suites/rgw/multisite-ansible/tasks/test_version_with_revert.yaml b/qa/suites/rgw/multisite-ansible/tasks/test_version_with_revert.yaml
new file mode 100644 (file)
index 0000000..8186c44
--- /dev/null
@@ -0,0 +1,49 @@
+tasks:
+- ssh-keys: null
+- ceph-ansible:
+    vars:
+      cluster: c1
+- ceph-ansible:
+    vars:
+      cluster: c2
+- new-rgw-multisite:
+   realm:
+     name: test-realm
+     is_default: true
+   zonegroups:
+     - name: test-zg
+       is_master: true
+       is_default: true
+       zones:
+         - name: test-zone
+           is_master: true
+           is_default: true
+           endpoints: [c1.rgw.0]
+         - name: test-zone2
+           is_default: true
+           endpoints: [c2.rgw.1]
+- multisite-test.userexec:
+      master_client: c1.rgw.0
+      test_dir_version: v1
+      master_config:
+        cluster_name: c1
+        user_count: 3
+      target_client: c2.rgw.1
+- sleep:
+      duration: 60
+- multisite-test:
+      test-name: test_version_with_revert
+      test_dir_version: v1
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+- sleep:
+      duration: 60
+- multisite-test.pull-io-info:
+- verify-io:
+      verification_script: read_io_info
diff --git a/qa/tasks/multisite_test.py b/qa/tasks/multisite_test.py
new file mode 100644 (file)
index 0000000..94b0922
--- /dev/null
@@ -0,0 +1,314 @@
+import yaml
+import contextlib
+import logging
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+log = logging.getLogger(__name__)
+import os
+import pwd
+import time
+import argparse
+
+
+# Test yaml to test script mapper for boto3
+
+tests_mapper_v2 = {'Mbuckets': 'test_Mbuckets',
+                   'Mbuckets_sharding': 'test_Mbuckets',
+                   'Mbuckets_with_Nobjects_create': 'test_Mbuckets_with_Nobjects',
+                   'Mbuckets_with_Nobjects_delete': 'test_Mbuckets_with_Nobjects',
+                   'Mbuckets_with_Nobjects_download': 'test_Mbuckets_with_Nobjects',
+                   'Mbuckets_with_Nobjects_sharding': 'test_Mbuckets_with_Nobjects'
+                   }
+
+def user_creation(user_config, mclient, tclient, version):
+
+    log.info('Create user on master client')
+
+    temp_yaml_file = 'user_create_' + str(os.getpid()) + pwd.getpwuid(os.getuid()).pw_name
+
+    #        temp_yaml_file = 'user_create.yaml'
+
+    if user_config is None:
+        assert isinstance(user_config, dict), "configuration not given"
+
+    log.info('creating yaml from the config: %s' % user_config)
+    local_file = '/tmp/' + temp_yaml_file
+    with open(local_file,  'w') as outfile:
+        outfile.write(yaml.dump(user_config, default_flow_style=False))
+
+    log.info('copying yaml to the client node')
+    destination_location = \
+        ('rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + temp_yaml_file)
+    mclient.put_file(local_file,  destination_location)
+    mclient.run(args=['ls', '-lt',
+                      'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version])
+    mclient.run(args=['cat',
+                      'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + temp_yaml_file])
+
+    #        mclient.run(args=['sudo', 'rm', '-f', run.Raw('%s' % local_file)], check_status=False)
+
+    mclient.run(
+        args=[
+            run.Raw(
+                'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/%s '
+                '-c rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/%s '
+                % (version, 'user_create.py', version, temp_yaml_file))])
+
+    log.info('copy user_details file from source client into local dir')
+
+    user_file = mclient.get_file('user_details', '/tmp')
+
+    time.sleep(10)
+
+    log.info('copy user_file to target client')
+
+    if mclient != tclient:
+        tclient.put_file(user_file, 'user_details')
+
+
+def test_data(tclient, test_name, script_name, version):
+
+    tclient.run(args=['ls', '-lt',
+                      'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version])
+    tclient.run(args=['cat',
+                      'rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/' % version + test_name])
+
+    tclient.run(
+        args=[
+            run.Raw(
+                'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/%s '
+                '-c rgw-tests/ceph-qe-scripts/rgw/%s/tests/multisite/yamls/%s '
+                % (version, script_name, version, test_name))])
+
+
+def copy_file_from(src_node, dest_node, file_path='/home/ubuntu/io_info.yaml'):
+
+    # copies to /tmp dir and then puts it in destination machines
+
+    log.info('copy of io_info.yaml from %s initiated' % src_node)
+
+#    io_info_file = src_node.get_file(file_path, '/tmp')
+
+    io_info_file = teuthology.get_file(
+                remote=src_node,
+                path=file_path,
+    )
+
+    time.sleep(10)
+
+    log.info('copy io_info_file to %s' % dest_node)
+
+    teuthology.sudo_write_file(
+        remote=dest_node,
+        path=file_path,
+        data=io_info_file)
+
+#    dest_node.put_file(io_info_file, file_name)
+
+    log.info('copy of io_info.yaml completed')
+
+
+@contextlib.contextmanager
+def pull_io_info(ctx, config):
+
+    # copy file from the node running tests to all other rgw nodes
+    """
+        - multisite_test.pull_io_info:
+    """
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    mclient = ctx.multisite_test.master
+    tclient = ctx.multisite_test.target
+
+    if mclient != tclient:
+        mclient.run(args=[run.Raw('sudo mv io_info.yaml io_info_2.yaml')])
+
+    clients = ctx.cluster.only(teuthology.is_type('rgw'))
+    for remote, roles_for_host in clients.remotes.iteritems():
+        if remote != tclient:
+            copy_file_from(tclient, remote)
+
+    yield
+
+
+@contextlib.contextmanager
+def userexec(ctx, config):
+
+    # Create user and copy the user_details to target client
+
+    """
+    -multisite-test.userexec:
+        test_dir_version: v1
+        master_client: source.rgw.0
+        master_config:
+            cluster_name: source
+            user_count: 3
+        target_client: target.rgw.1
+    """
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task userexec only supports a dictionary for configuration"
+
+    log.info('cloning the repo to client machines')
+
+    remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+    for remote, roles_for_host in remotes.remotes.iteritems():
+
+        cleanup = lambda x: remote.run(args=[run.Raw('sudo rm -rf %s' % x)])
+
+        soot = ['venv', 'rgw-tests', '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*',
+                '*.key.*', 'user_details', 'io_info.yaml', 'io_info_2.yaml']
+
+        map(cleanup, soot)
+
+        remote.run(args=['mkdir', 'rgw-tests'])
+        remote.run(
+            args=[
+                'cd',
+                'rgw-tests',
+                run.Raw(';'),
+                'git',
+                'clone',
+                '-b',
+                'multisite-boto3',
+                'http://gitlab.cee.redhat.com/ceph/ceph-qe-scripts.git',
+                ])
+
+        remote.run(args=['virtualenv', 'venv'])
+        remote.run(
+            args=[
+                'source',
+                'venv/bin/activate',
+                run.Raw(';'),
+                run.Raw('pip install boto boto3 names PyYaml psutil ConfigParser simplejson'),
+                run.Raw(';'),
+                'deactivate'])
+
+    master_client = config['master_client']
+    (mclient,) = ctx.cluster.only(master_client).remotes.iterkeys()
+
+    target_client = config['target_client']
+    (tclient,) = ctx.cluster.only(target_client).remotes.iterkeys()
+
+    user_config = config['master_config']
+
+    user_data = None
+
+    user_data = dict(
+        config=dict(
+            cluster_name=user_config['cluster_name'],
+            user_count=user_config['user_count'],
+        )
+    )
+
+    if config['test_dir_version'] == 'v1':
+        user_creation(user_data, mclient, tclient, version='v1')
+    elif config['test_dir_version'] == 'v2':
+        user_creation(user_data, mclient, tclient, version='v2')
+
+    yield
+
+
+def execute_v1(tclient, config):
+
+    # Tests using boto2 here
+
+    test_name = config['test-name'] + ".yaml"
+    script_name = config['test-name'] + ".py"
+
+    log.info('test name :%s' % config['test-name'])
+
+    # Execute  test
+
+    test_data(tclient, test_name, script_name, version='v1')
+
+
+def execute_v2(tclient, config):
+
+    # Tests using boto3 here
+
+    test_name = config['test-name'] + ".yaml"
+    script_name = tests_mapper_v2.get(config['test-name'], None) + ".py"
+
+    log.info('test name :%s' % config['test-name'])
+
+    # Execute  test
+
+    test_data(tclient, test_name, script_name, version='v2')
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task multisite_test only supports a dictionary for configuration"
+
+    # Master node for metadata
+
+    master_client = config['master_client']
+    (mclient,) = ctx.cluster.only(master_client).remotes.iterkeys()
+
+    # Target node where the tests will be run. Can be primary or secondary multisite zones.
+
+    target_client = config['target_client']
+    (tclient,) = ctx.cluster.only(target_client).remotes.iterkeys()
+
+    ctx.multisite_test = argparse.Namespace()
+    ctx.multisite_test.master = mclient
+    ctx.multisite_test.target = tclient
+    ctx.multisite_test.version = config['test_dir_version']
+
+    log.info('test_dir_version: %s' % config['test_dir_version'])
+
+    if config['test_dir_version'] == 'v1':
+        execute_v1(tclient, config)
+
+    if config['test_dir_version'] == 'v2':
+        execute_v2(tclient, config)
+
+    try:
+        yield
+    finally:
+
+        remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+        for remote, roles_for_host in remotes.remotes.iteritems():
+
+            remote.run(
+                args=[
+                    'source',
+                    'venv/bin/activate',
+                    run.Raw(';'),
+                    run.Raw('pip uninstall boto boto3 names PyYaml -y'),
+                    run.Raw(';'),
+                    'deactivate'])
+
+            log.info('test completed')
+
+            log.info("Deleting repos")
+
+            cleanup = lambda x: remote.run(args=[run.Raw('sudo rm -rf %s' % x)])
+
+            soot = ['venv', 'rgw-tests', '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*',
+                    '*.key.*', 'user_details', 'io_info.yaml', 'io_info_2.yaml']
+
+            map(cleanup, soot)
diff --git a/qa/tasks/multisite_test_v1.py b/qa/tasks/multisite_test_v1.py
new file mode 100644 (file)
index 0000000..ed6e364
--- /dev/null
@@ -0,0 +1,648 @@
+import yaml
+import contextlib
+import logging
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+log = logging.getLogger(__name__)
+import os
+import pwd
+import time
+
+
+def user_creation(user_config, mclient, tclient):
+
+    log.info('Create user on master client')
+
+    temp_yaml_file = 'user_create_' + str(os.getpid()) + pwd.getpwuid(os.getuid()).pw_name
+
+    #        temp_yaml_file = 'user_create.yaml'
+
+    if user_config is None:
+        assert isinstance(user_config, dict), "configuration not given"
+
+    log.info('creating yaml from the config: %s' % user_config)
+    local_file = '/tmp/' + temp_yaml_file
+    with open(local_file,  'w') as outfile:
+        outfile.write(yaml.dump(user_config, default_flow_style=False))
+
+    log.info('copying yaml to the client node')
+    destination_location = \
+        'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/' + temp_yaml_file
+    mclient.put_file(local_file,  destination_location)
+    mclient.run(args=['ls', '-lt',
+                      'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/'])
+    mclient.run(args=['cat',
+                      'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/' + temp_yaml_file])
+
+    #        mclient.run(args=['sudo', 'rm', '-f', run.Raw('%s' % local_file)], check_status=False)
+
+    mclient.run(
+        args=[
+            run.Raw(
+                'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/%s '
+                '-c rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/%s ' % ('user_create.py', temp_yaml_file))])
+
+    log.info('copy user_details file from source client into local dir')
+
+    user_file = mclient.get_file('user_details', '/tmp')
+
+    time.sleep(20)
+
+    log.info('copy user_file to target client')
+
+    if mclient != tclient:
+        tclient.put_file(user_file, 'user_details')
+
+
+def test_data(script_name, data_config, tclient):
+
+    script_fname = script_name + ".py"
+
+    yaml_fname = script_name + ".yaml"
+
+    log.info('test: %s' % script_fname)
+
+    temp_yaml_file = yaml_fname + "_" + str(os.getpid()) + pwd.getpwuid(os.getuid()).pw_name
+
+    if data_config is None:
+        assert isinstance(data_config, dict), "configuration not given"
+
+    log.info('creating yaml from the config: %s' % data_config)
+    local_file = '/tmp/' + temp_yaml_file
+    with open(local_file,  'w') as outfile:
+        outfile.write(yaml.dump(data_config, default_flow_style=False))
+
+    log.info('copying yaml to the client node')
+    destination_location = \
+        'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/' + yaml_fname
+    tclient.put_file(local_file,  destination_location)
+    tclient.run(args=['ls', '-lt',
+                      'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/'])
+    tclient.run(args=['cat',
+                      'rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/' + yaml_fname])
+
+    tclient.run(args=['sudo', 'rm', '-f', run.Raw('%s' % local_file)], check_status=False)
+
+    tclient.run(
+        args=[
+            run.Raw(
+                'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/%s '
+                '-c rgw-tests/ceph-qe-scripts/rgw/v1/tests/multisite/yamls/%s ' % (script_fname, yaml_fname))])
+
+
+def copy_file_from(src_node, dest_node, file_name = 'io_info.yaml'):
+
+    # copies to /tmp dir and then puts it in destination machines
+
+    log.info('copy of io_info.yaml from initiated')
+
+    io_info_file = src_node.get_file(file_name, '/tmp')
+
+    dest_node.put_file(io_info_file, file_name)
+
+    log.info('copy of io_info.yaml completed')
+
+
+def test_exec(ctx, config, data, tclient, mclient):
+
+    assert data is not None, "Got no test in configuration"
+
+    log.info('test name :%s' % config['test-name'])
+
+    script_name = config['test-name']
+
+    log.info('script_name: %s' % script_name)
+
+    test_data(script_name, data, tclient=tclient)
+
+    # copy the io_yaml from from target node to master node.
+
+    time.sleep(60)
+    # wait for sync
+
+    # no verification is being done for acls test cases right now.
+
+    if not 'acls' in script_name:
+
+        log.info('no test with acls: %s' % script_name)
+
+    # copy file from the node running tests to all other rgw nodes
+
+    if mclient != tclient:
+        mclient.run(args=[run.Raw('sudo mv io_info.yaml io_info_2.yaml')])
+
+    clients = ctx.cluster.only(teuthology.is_type('rgw'))
+    for remote, roles_for_host in clients.remotes.iteritems():
+        if remote != tclient:
+            copy_file_from(tclient, remote)
+
+
+@contextlib.contextmanager
+def userexec(ctx, config):
+
+    # Create user and copy the user_details to target client
+
+    """
+    -multisite-test.userexec:
+        master_client: source.rgw.0
+        master_config:
+            cluster_name: source
+            user_count: 3
+        target_client: target.rgw.1
+    """
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task set-repo only supports a dictionary for configuration"
+
+    log.info('cloning the repo to client machines')
+
+    remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+    for remote, roles_for_host in remotes.remotes.iteritems():
+
+        cleanup = lambda x: remote.run(args=[run.Raw('sudo rm -rf %s' % x)])
+
+        soot = ['venv', 'rgw-tests', '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*',
+                '*.key.*', 'user_details', 'io_info.yaml', 'io_info_2.yaml']
+
+        map(cleanup, soot)
+
+        remote.run(args=['mkdir', 'rgw-tests'])
+        remote.run(
+            args=[
+                'cd',
+                'rgw-tests',
+                run.Raw(';'),
+                'git',
+                'clone',
+                'http://gitlab.cee.redhat.com/ceph/ceph-qe-scripts.git',
+                ])
+
+        remote.run(args=['virtualenv', 'venv'])
+        remote.run(
+            args=[
+                'source',
+                'venv/bin/activate',
+                run.Raw(';'),
+                run.Raw('pip install boto names PyYaml ConfigParser simplejson'),
+                run.Raw(';'),
+                'deactivate'])
+
+    master_client = config['master_client']
+    (mclient,) = ctx.cluster.only(master_client).remotes.iterkeys()
+
+    target_client = config['target_client']
+    (tclient,) = ctx.cluster.only(target_client).remotes.iterkeys()
+
+    user_config = config['master_config']
+
+    user_data = None
+
+    user_data = dict(
+        config=dict(
+            cluster_name=user_config['cluster_name'],
+            user_count=user_config['user_count'],
+        )
+    )
+
+    user_creation(user_data, mclient, tclient)
+
+    yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+    """
+
+    tasks:
+    - multisite-test-v1:
+        test-name: test_Mbuckets
+        master_client: source.client.0
+        target_client: target.client.1
+        target_config:
+            bucket_count: 5
+
+    tasks:
+    - multisite-test-v1:
+        test-name: test_Mbuckets_with_Nobjects
+        master_client: source.client.0
+        target_client: target.client.1
+        target_config:
+            user_count: 3
+            bucket_count: 5
+            objects_count: 5
+            min_file_size: 5
+            max_file_size: 10
+
+    tasks:
+    - multisite-test-v1:
+          test-name: test_bucket_with_delete
+          master_client: c1.rgw.0
+          target_client: c2.rgw.1
+          target_config:
+              bucket_count: 5
+              objects_count: 5
+              min_file_size: 5
+              max_file_size: 10
+
+
+
+    tasks:
+    - multisite-test-v1:
+          test-name: test_multipart_upload
+          master_client: c1.rgw.0
+          target_client: c2.rgw.1
+          target_config:
+              bucket_count: 5
+              min_file_size: 5
+              max_file_size: 10
+
+    tasks:
+    - multisite-test-v1:
+          test-name: test_multipart_upload_download
+          master_client: c1.rgw.0
+          target_client: c2.rgw.1
+          target_config:
+              bucket_count: 5
+              min_file_size: 100
+              max_file_size: 200
+
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_multipart_upload_cancel
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          break_at_part_no: 10
+          min_file_size: 100
+          max_file_size: 200
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_basic_versioning
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_delete_key_versions
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+
+    tasks:
+    - multisite-test:
+      test-name: test_suspend_versioning
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_version_with_revert
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          version_count: 5
+          min_file_size: 10
+          max_file_size: 20
+
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_acls
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_acls_all_usrs
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+
+    tasks:
+    - multisite-test-v1:
+      test-name: test_acls_copy_obj
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+
+    tasks:
+     multisite-test-v1:
+      test-name: test_acls_reset
+      master_client: c1.rgw.0
+      target_client: c2.rgw.1
+      target_config:
+          bucket_count: 5
+          objects_count: 10
+          min_file_size: 10
+          max_file_size: 20
+
+
+    """
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task multisite_test only supports a dictionary for configuration"
+
+    master_client = config['master_client']
+    (mclient,) = ctx.cluster.only(master_client).remotes.iterkeys()
+
+    target_client = config['target_client']
+    (tclient,) = ctx.cluster.only(target_client).remotes.iterkeys()
+
+    test_config = config['target_config']
+    data = None
+
+    if config['test-name'] == 'test_Mbuckets':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count']
+            )
+        )
+
+    if config['test-name'] == 'test_Mbuckets_with_Nobjects':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_Mbuckets_with_Nobjects_shards':
+
+        # changing the value of config['test-name'] to take test_Mbuckets_with_Nobjects,
+        # since this test also takes the following configuration
+
+        config['test-name'] = 'test_Mbuckets_with_Nobjects'
+
+        data = dict(
+            config=dict(
+                shards=test_config['shards'],
+                max_objects=test_config['max_objects'],
+                bucket_count=test_config['bucket_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_bucket_with_delete':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    # multipart
+
+    if config['test-name'] == 'test_multipart_upload':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_multipart_upload_download':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_multipart_upload_cancel':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                break_at_part_no=test_config['break_at_part_no'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    # Versioning
+
+    if config['test-name'] == 'test_basic_versioning':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                version_count=test_config['version_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_delete_key_versions':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                version_count=test_config['version_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_suspend_versioning':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                version_count=test_config['version_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_version_with_revert':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                version_count=test_config['version_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    # ACLs
+
+    if config['test-name'] == 'test_acls':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_acls_all_usrs':
+
+        data = dict(
+            config=dict(
+                bucket_count=test_config['bucket_count'],
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_acls_copy_obj':
+
+        data = dict(
+            config=dict(
+                objects_count=test_config['objects_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    if config['test-name'] == 'test_acls_reset':
+
+        data = dict(
+            config=dict(
+                objects_count=test_config['objects_count'],
+                bucket_count=test_config['bucket_count'],
+                objects_size_range=dict(
+                    min=test_config['min_file_size'],
+                    max=test_config['max_file_size']
+                )
+
+            )
+        )
+
+    test_exec(ctx, config, data, tclient, mclient)
+
+    try:
+        yield
+    finally:
+
+        remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+        for remote, roles_for_host in remotes.remotes.iteritems():
+
+            remote.run(
+                args=[
+                    'source',
+                    'venv/bin/activate',
+                    run.Raw(';'),
+                    run.Raw('pip uninstall boto names PyYaml -y'),
+                    run.Raw(';'),
+                    'deactivate'])
+
+            log.info('test completed')
+
+            log.info("Deleting repos")
+
+            cleanup = lambda x: remote.run(args=[run.Raw('sudo rm -rf %s' % x)])
+
+            soot = ['venv', 'rgw-tests', '*.json', 'Download.*', 'Download', '*.mpFile', 'x*', 'key.*', 'Mp.*',
+                    '*.key.*', 'user_details', 'io_info.yaml', 'io_info_2.yaml']
+
+            map(cleanup, soot)
diff --git a/qa/tasks/new_rgw_multisite.py b/qa/tasks/new_rgw_multisite.py
new file mode 100644 (file)
index 0000000..7a5a1b5
--- /dev/null
@@ -0,0 +1,487 @@
+import argparse
+import contextlib
+import json
+import logging
+import random
+import string
+from cStringIO import StringIO
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from requests.packages.urllib3 import PoolManager
+from requests.packages.urllib3.util import Retry
+
+log = logging.getLogger(__name__)
+
+access_key = None
+secret = None
+
+def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False,
+             format='json', decode=True, log_level=logging.DEBUG):
+    log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
+    testdir = teuthology.get_testdir(ctx)
+    cluster_name, daemon_type, client_id = teuthology.split_role(client)
+    pre = ['sudo',
+        'radosgw-admin'.format(tdir=testdir),
+        '--log-to-stderr',
+        '--cluster', cluster_name,
+        ]
+    pre.extend(cmd)
+    log.log(log_level, 'rgwadmin: cmd=%s' % pre)
+    (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+    proc = remote.run(
+        args=pre,
+        check_status=check_status,
+        stdout=StringIO(),
+        stderr=StringIO(),
+        stdin=stdin,
+        )
+    r = proc.exitstatus
+    out = proc.stdout.getvalue()
+    if not decode:
+        return (r, out)
+    j = None
+    if not r and out != '':
+        try:
+            j = json.loads(out)
+            log.log(log_level, ' json result: %s' % j)
+        except ValueError:
+            j = out
+            log.log(log_level, ' raw result: %s' % j)
+    return (r, j)
+
+
+def extract_endpoints(ctx, role):
+
+    port = 8080
+    role_endpoints = {}
+    remote,  = ctx.cluster.only(role).remotes.iterkeys()
+    role_endpoints[role] = (remote.name.split('@')[1], port)
+    log.info('Endpoints are {role_endpoints}'.format(role_endpoints=role_endpoints))
+
+    return role_endpoints
+
+
+def get_config_clients(ctx, config):
+
+    master_zonegroup = None
+    master_zone = None
+    master_client = None
+    target_zone = None
+    target_client = None
+
+    zonegroups_config = config['zonegroups']
+    for zonegroup_config in zonegroups_config:
+        if zonegroup_config.get('is_master', False):
+            master_zonegroup = zonegroup_config.get('name')
+        for zone in zonegroup_config['zones']:
+            if zone.get('is_master', False):
+                mz_config = zone
+                master_zone = mz_config.get('name')
+                master_client = mz_config.get('endpoints')[0]
+            else:
+                tz_config = zone
+                target_zone = tz_config.get('name')
+                target_client = tz_config.get('endpoints')[0]
+
+    return master_zonegroup, master_zone, master_client, target_zone, target_client
+
+
+def gen_access_key():
+    return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
+
+
+def gen_secret():
+    return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32))
+
+
+def wait_for_radosgw(ctx, client):
+
+    role_endpoints = extract_endpoints(ctx, client)
+    host, port = role_endpoints[client]
+    url = "http://%s:%d" % (host, port)
+    http = PoolManager(retries=Retry(connect=8, backoff_factor=1))
+    http.request('GET', url)
+
+
+@contextlib.contextmanager
+def configure_master_zonegroup_and_zones(ctx, config, master_zonegroup, master_zone, realm, master_client):
+
+    """ Create zonegroup and zone on master"""
+    global access_key, secret
+    access_key = gen_access_key()
+    secret = gen_secret()
+
+    role_endpoints  = extract_endpoints(ctx, master_client)
+    host, port = role_endpoints[master_client]
+
+    endpoint = 'http://{host}:{port}'.format(host=host, port=port)
+    log.debug("endpoint: %s", endpoint)
+
+    log.info('creating master zonegroup and zone on {}'.format(master_client))
+    rgwadmin(ctx, master_client,
+             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
+             check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['zonegroup', 'create', '--rgw-zonegroup', master_zonegroup, '--master', '--endpoints', endpoint,
+                  '--default'], check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['zone', 'create', '--rgw-zonegroup', master_zonegroup,
+                  '--rgw-zone', master_zone, '--endpoints', endpoint, '--access-key',
+                  access_key, '--secret',
+                  secret, '--master', '--default'],
+             check_status=True)
+
+    rgwadmin(ctx, master_client,
+             cmd=['period', 'update', '--commit'],
+             check_status=True)
+
+    yield
+
+
+@contextlib.contextmanager
+def configure_user_for_client(ctx, master_client):
+
+    """ Create system user"""
+
+    user = 'sysadmin'
+
+    log.debug('Creating system user {user} on {client}'.format(
+        user=user, client=master_client))
+    rgwadmin(ctx, master_client,
+                cmd=[
+                    'user', 'create',
+                    '--uid', user,
+                    '--access-key', access_key,
+                    '--secret', secret,
+                    '--display-name', user,
+                    '--system',
+                ],
+                check_status=True,
+        )
+    yield
+
+
+@contextlib.contextmanager
+def pull_configuration(ctx, realm,  master_client, target_client):
+
+    """ Pull realm and period from master zone"""
+
+    role_endpoints = extract_endpoints(ctx, master_client)
+    host, port = role_endpoints[master_client]
+
+    endpoint = 'http://{host}:{port}'.format(host=host, port=port)
+    log.debug("endpoint: %s", endpoint)
+
+    log.info('Pulling master config information from {}'.format(master_client))
+    rgwadmin(ctx, target_client,
+             cmd=['realm', 'pull', '--url',
+                  endpoint, '--access_key',
+                  access_key, '--secret',
+                  secret],
+            check_status=True)
+
+    rgwadmin(ctx, target_client,
+             cmd=['realm', 'default', '--rgw-realm', realm])
+
+    rgwadmin(ctx, target_client,
+             cmd=['period', 'pull', '--url', endpoint, '--access_key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    yield
+
+
+@contextlib.contextmanager
+def configure_target_zone(ctx, config, target_zone, master_zonegroup, target_client):
+
+    role_endpoints  = extract_endpoints(ctx, target_client)
+    host, port = role_endpoints[target_client]
+
+    endpoint = 'http://{host}:{port}'.format(host=host, port=port)
+    log.debug("endpoint: %s", endpoint)
+
+    log.info('creating zone on {}'.format(target_client))
+
+    zone_config = {}
+
+    zgs = ctx.new_rgw_multisite.config['zonegroups']
+    for zg in zgs:
+        for zone in zg.get('zones'):
+            zone_config = zone
+
+    if zone_config.get('is_read_only', False):
+        rgwadmin(ctx, target_client,
+                 cmd=['zone', 'create', '--rgw-zonegroup', master_zonegroup,
+                      '--rgw-zone', target_zone, '--endpoints', endpoint, '--access-key',
+                      access_key, '--secret',
+                      secret, '--default', '--read-only'],
+                 check_status=True)
+    else:
+        rgwadmin(ctx, target_client,
+                 cmd=['zone', 'create', '--rgw-zonegroup', master_zonegroup,
+                      '--rgw-zone', target_zone, '--endpoints', endpoint, '--access-key',
+                      access_key, '--secret',
+                      secret, '--default'],
+                 check_status=True)
+
+    rgwadmin(ctx, target_client,
+             cmd=['period', 'update', '--commit',
+                  '--access_key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+
+    yield
+
+
+@contextlib.contextmanager
+def restart_rgw(ctx, on_client):
+
+    log.info('Restarting rgw...')
+    log.debug('client %r', on_client)
+    (remote,) = ctx.cluster.only(on_client).remotes.iterkeys()
+    hostname = remote.name.split('@')[1].split('.')[0]
+    rgw_cmd = [
+        'sudo', 'systemctl', 'restart', 'ceph-radosgw@rgw.{hostname}'.format(hostname=hostname)]
+
+    run_cmd = list(rgw_cmd)
+    remote.run(args=run_cmd)
+
+    wait_for_radosgw(ctx, on_client)
+
+    yield
+
+
+@contextlib.contextmanager
+def failover(ctx, config):
+    """
+    - new-rgw-multisite.failover:
+
+    """
+    # When master is down, bring up secondary as the master zone
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task only supports a dictionary for configuration"
+
+    master_zonegroup, master_zone, master_client, target_zone, target_client = \
+        get_config_clients(ctx, ctx.new_rgw_multisite.config)
+
+    # Make secondary zone master
+    rgwadmin(ctx, target_client,
+             cmd=['zone', 'modify', '--rgw-zone', target_zone, '--master', '--default', '--access-key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    # Do period commit
+    rgwadmin(ctx, target_client,
+             cmd=['period', 'update', '--commit',
+                  '--access_key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    # Restart gateway
+
+    restart_rgw(ctx, target_client)
+
+    yield
+
+
+@contextlib.contextmanager
+def failback(ctx, config):
+
+    """
+    - new-rgw-multisite.failback:
+    """
+    # When master node is back, failback to original master zone
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task only supports a dictionary for configuration"
+
+    master_zonegroup, master_zone, master_client, target_zone, target_client = \
+        get_config_clients(ctx, ctx.new_rgw_multisite.config)
+
+    role_endpoints  = extract_endpoints(ctx, target_client)
+    host, port = role_endpoints[target_client]
+
+    endpoint = 'http://{host}:{port}'.format(host=host, port=port)
+
+    # Period pull in former master zone from current master zone
+
+    rgwadmin(ctx, master_client,
+             cmd=['period', 'pull', '--url', endpoint, '--access_key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    # Make the original master zone as master
+
+    rgwadmin(ctx, master_client,
+             cmd=['zone', 'modify', '--rgw-zone', master_zone, '--master', '--default', '--access-key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    # Do period commit
+
+    rgwadmin(ctx, master_client,
+             cmd=['period', 'update', '--commit',
+                  '--access_key',
+                  access_key, '--secret',
+                  secret],
+             check_status=True)
+
+    # Restart gateway
+
+    restart_rgw(ctx, master_client)
+
+    # If secondary zone was read-only before failover, explicitly set it to --read-only again.
+
+    zone_config = {}
+
+    zgs = ctx.new_rgw_multisite.config['zonegroups']
+    for zg in zgs:
+        for zone in zg.get('zones'):
+            zone_config = zone
+
+    if zone_config.get('is_read_only', False):
+        rgwadmin(ctx, target_client,
+                 cmd=['zone', 'modify', '--rgw-zone', target_zone, '--read-only', '--access-key',
+                      access_key, '--secret',
+                      secret],
+                 check_status=True)
+
+        # Do period commit
+        rgwadmin(ctx, target_client,
+                 cmd=['period', 'update', '--commit',
+                      '--access_key',
+                      access_key, '--secret',
+                      secret],
+                 check_status=True)
+
+        # Restart gateway
+
+        restart_rgw(ctx, target_client)
+
+    yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+
+    """
+    - new-multisite:
+            realm:
+              name: test-realm
+              is_default: true
+            zonegroups:
+              - name: test-zonegroup
+                is_master: true
+                is_default: true
+                zones:
+                  - name: test-zone1
+                    is_master: true
+                    is_default: true
+                    endpoints: [c1.client.0]
+                  - name: test-zone2
+                    is_default: true
+                    is_read_only: true
+                    endpoints: [c2.client.0]
+    """
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task only supports a dictionary for configuration"
+
+    zonegroups = {}
+
+    if 'zonegroups' in config:
+        zonegroups = config['zonegroups']
+
+    realm = None
+    if 'realm' in config:
+        realm = config['realm']
+    realm_name = realm.get('name')
+
+    ctx.new_rgw_multisite = argparse.Namespace()
+    ctx.new_rgw_multisite.realm = realm
+    ctx.new_rgw_multisite.zonegroups = zonegroups
+    ctx.new_rgw_multisite.config = config
+
+    master_zonegroup, master_zone, master_client, target_zone, target_client = get_config_clients(ctx, config)
+
+    ctx.new_rgw_multisite.master_client = master_client
+    ctx.new_rgw_multisite.target_client = target_client
+
+    subtasks = [
+        lambda: configure_master_zonegroup_and_zones(
+            ctx=ctx,
+            config=config,
+            master_zonegroup=master_zonegroup,
+            master_zone = master_zone,
+            realm=realm_name,
+            master_client=master_client
+        ),
+    ]
+
+    subtasks.extend([
+        lambda: configure_user_for_client(
+            ctx=ctx,
+            master_client=master_client
+        ),
+    ])
+
+    subtasks.extend([
+        lambda: restart_rgw(ctx=ctx, on_client=master_client),
+    ])
+
+    subtasks.extend([
+        lambda: pull_configuration(ctx=ctx,
+                                   realm=realm_name,
+                                   master_client=master_client,
+                                   target_client=target_client,
+                                   ),
+    ])
+
+    subtasks.extend([
+        lambda: configure_target_zone(ctx=ctx,
+                                      config=config,
+                                      target_zone=target_zone,
+                                      master_zonegroup=master_zonegroup,
+                                      target_client=target_client,
+                                      ),
+    ]),
+
+    subtasks.extend([
+        lambda: restart_rgw(ctx=ctx,
+                            on_client=target_client),
+    ])
+
+    with contextutil.nested(*subtasks):
+        yield
+
+
+
+
+
+
+
diff --git a/qa/tasks/verify_io.py b/qa/tasks/verify_io.py
new file mode 100644 (file)
index 0000000..be91485
--- /dev/null
@@ -0,0 +1,69 @@
+import yaml
+import contextlib
+import logging
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+log = logging.getLogger(__name__)
+
+
+def test_exec(ctx, config, client):
+
+    script_name = config['verification_script'] + ".py"
+    yaml_name = 'io_info.yaml'
+
+    client.run(args=['ls', '-lt', yaml_name])
+
+    if ctx.multisite_test.version == 'v1':
+
+        client.run(
+            args=[
+                run.Raw(
+                    'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/v1/lib/%s '
+                    % script_name)])
+
+    elif ctx.multisite_test.version == 'v2':
+
+        client.run(
+            args=[
+                run.Raw(
+                    'sudo venv/bin/python2.7 rgw-tests/ceph-qe-scripts/rgw/v2/lib/%s '
+                    % script_name)])
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+
+    log.info('starting the task')
+
+    log.info('config %s' % config)
+
+    if config is None:
+        config = {}
+
+    assert isinstance(config, dict), \
+        "task set-repo only supports a dictionary for configuration"
+
+    remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+    for remote, roles_for_host in remotes.remotes.iteritems():
+
+        remote.run(args=['virtualenv', 'venv'])
+        remote.run(
+            args=[
+                'source',
+                'venv/bin/activate',
+                run.Raw(';'),
+                run.Raw('pip install boto boto3 names PyYaml ConfigParser'),
+                run.Raw(';'),
+                'deactivate'])
+        test_exec(ctx, config, remote)
+
+    try:
+        yield
+    finally:
+
+        remotes = ctx.cluster.only(teuthology.is_type('rgw'))
+        for remote, roles_for_host in remotes.remotes.iteritems():
+
+            log.info('Verification completed')
+
+            remote.run(args=[run.Raw('sudo rm -rf %s' % 'io_info.yaml')])