]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa : STS Testing Files (AssumeRole and GetSessionToken) 35986/head
authorKalpesh Pandya <kapandya@redhat.com>
Fri, 22 May 2020 20:58:53 +0000 (02:28 +0530)
committerroot <root@localhost.localdomain>
Tue, 15 Sep 2020 06:28:59 +0000 (11:58 +0530)
Added required files for testing of AssumeRole and GetSessionToken API's and modified s3tests.py to handle the same.

(cherry picked from commit c2c90eaf524087925b8a0993098a87132d4940bd)

Signed-off-by: Kalpesh Pandya <kapandya@redhat.com>
14 files changed:
qa/suites/rgw/sts/% [new file with mode: 0644]
qa/suites/rgw/sts/centos_latest.yaml [new file with mode: 0644]
qa/suites/rgw/sts/clusters/fixed-2.yaml [new file with mode: 0644]
qa/suites/rgw/sts/frontend/civetweb.yaml [new file with mode: 0644]
qa/suites/rgw/sts/objectstore/bluestore-bitmap.yaml [new file with mode: 0644]
qa/suites/rgw/sts/objectstore/filestore-xfs.yaml [new file with mode: 0644]
qa/suites/rgw/sts/overrides.yaml [new file with mode: 0644]
qa/suites/rgw/sts/rgw_pool_type/ec-profile.yaml [new file with mode: 0644]
qa/suites/rgw/sts/rgw_pool_type/ec.yaml [new file with mode: 0644]
qa/suites/rgw/sts/rgw_pool_type/replicated.yaml [new file with mode: 0644]
qa/suites/rgw/sts/tasks/+ [new file with mode: 0644]
qa/suites/rgw/sts/tasks/0-install.yaml [new file with mode: 0644]
qa/suites/rgw/sts/tasks/ststests.yaml [new file with mode: 0644]
qa/tasks/s3tests.py

diff --git a/qa/suites/rgw/sts/% b/qa/suites/rgw/sts/%
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rgw/sts/centos_latest.yaml b/qa/suites/rgw/sts/centos_latest.yaml
new file mode 100644 (file)
index 0000000..24ae800
--- /dev/null
@@ -0,0 +1,6 @@
+os_type: centos
+os_version: "8.1"
+overrides:
+  selinux:
+    whitelist:
+      - scontext=system_u:system_r:logrotate_t:s0
diff --git a/qa/suites/rgw/sts/clusters/fixed-2.yaml b/qa/suites/rgw/sts/clusters/fixed-2.yaml
new file mode 100644 (file)
index 0000000..e4448bb
--- /dev/null
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mon.c, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0, node-exporter.a]
+- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1, prometheus.a, node-exporter.b]
+openstack:
+- volumes: # attached to each instance
+    count: 4
+    size: 10 # GB
+overrides:
+  ceph:
+    conf:
+      osd:
+        osd shutdown pgref assert: true
diff --git a/qa/suites/rgw/sts/frontend/civetweb.yaml b/qa/suites/rgw/sts/frontend/civetweb.yaml
new file mode 100644 (file)
index 0000000..57e8cc4
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    frontend: civetweb
diff --git a/qa/suites/rgw/sts/objectstore/bluestore-bitmap.yaml b/qa/suites/rgw/sts/objectstore/bluestore-bitmap.yaml
new file mode 100644 (file)
index 0000000..69c37ac
--- /dev/null
@@ -0,0 +1,42 @@
+overrides:
+  thrashosds:
+    bdev_inject_crash: 2
+    bdev_inject_crash_probability: .5
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd objectstore: bluestore
+        bluestore block size: 96636764160
+        debug bluestore: 20
+        debug bluefs: 20
+        debug rocksdb: 10
+        bluestore fsck on mount: true
+        bluestore allocator: bitmap
+        # lower the full ratios since we can fill up a 100gb osd so quickly
+        mon osd full ratio: .9
+        mon osd backfillfull_ratio: .85
+        mon osd nearfull ratio: .8
+        osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+#        bluestore bluefs env mirror: true
+        bdev enable discard: true
+        bdev async discard: true
+  ceph-deploy:
+    fs: xfs
+    bluestore: yes
+    conf:
+      osd:
+        osd objectstore: bluestore
+        bluestore block size: 96636764160
+        debug bluestore: 20
+        debug bluefs: 20
+        debug rocksdb: 10
+        bluestore fsck on mount: true
+        # lower the full ratios since we can fill up a 100gb osd so quickly
+        mon osd full ratio: .9
+        mon osd backfillfull_ratio: .85
+        mon osd nearfull ratio: .8
+        osd failsafe full ratio: .95
+        bdev enable discard: true
+        bdev async discard: true
diff --git a/qa/suites/rgw/sts/objectstore/filestore-xfs.yaml b/qa/suites/rgw/sts/objectstore/filestore-xfs.yaml
new file mode 100644 (file)
index 0000000..bd18aca
--- /dev/null
@@ -0,0 +1,14 @@
+overrides:
+  ceph:
+    fs: xfs
+    conf:
+      osd:
+        osd objectstore: filestore
+        osd sloppy crc: true
+  ceph-deploy:
+    fs: xfs
+    filestore: True
+    conf:
+      osd:
+        osd objectstore: filestore
+        osd sloppy crc: true
diff --git a/qa/suites/rgw/sts/overrides.yaml b/qa/suites/rgw/sts/overrides.yaml
new file mode 100644 (file)
index 0000000..1cb4890
--- /dev/null
@@ -0,0 +1,13 @@
+overrides:
+  ceph:
+    wait-for-scrub: false
+    conf:
+      client:
+        setuser: ceph
+        setgroup: ceph
+        debug rgw: 20
+        rgw crypt s3 kms backend: testing
+        rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+        rgw crypt require ssl: false
+  rgw:
+    storage classes: LUKEWARM, FROZEN
diff --git a/qa/suites/rgw/sts/rgw_pool_type/ec-profile.yaml b/qa/suites/rgw/sts/rgw_pool_type/ec-profile.yaml
new file mode 100644 (file)
index 0000000..f6fbf35
--- /dev/null
@@ -0,0 +1,8 @@
+overrides:
+  rgw:
+    ec-data-pool: true
+    erasure_code_profile:
+      name: testprofile
+      k: 3
+      m: 1
+      crush-failure-domain: osd
diff --git a/qa/suites/rgw/sts/rgw_pool_type/ec.yaml b/qa/suites/rgw/sts/rgw_pool_type/ec.yaml
new file mode 100644 (file)
index 0000000..7c0c5e6
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: true
diff --git a/qa/suites/rgw/sts/rgw_pool_type/replicated.yaml b/qa/suites/rgw/sts/rgw_pool_type/replicated.yaml
new file mode 100644 (file)
index 0000000..c91709e
--- /dev/null
@@ -0,0 +1,3 @@
+overrides:
+  rgw:
+    ec-data-pool: false
diff --git a/qa/suites/rgw/sts/tasks/+ b/qa/suites/rgw/sts/tasks/+
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/qa/suites/rgw/sts/tasks/0-install.yaml b/qa/suites/rgw/sts/tasks/0-install.yaml
new file mode 100644 (file)
index 0000000..5ebe672
--- /dev/null
@@ -0,0 +1,20 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+
+tasks:
+- install:
+#    flavor: notcmalloc
+- ceph:
+- openssl_keys:
+- rgw:
+    client.0:
+#      valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214
+
+overrides:
+  ceph:
+    conf:
+      global:
+        osd_min_pg_log_entries: 10
+        osd_max_pg_log_entries: 10
+      client:
+        rgw lc debug interval: 10
diff --git a/qa/suites/rgw/sts/tasks/ststests.yaml b/qa/suites/rgw/sts/tasks/ststests.yaml
new file mode 100644 (file)
index 0000000..e5c5273
--- /dev/null
@@ -0,0 +1,12 @@
+tasks:
+- s3tests:
+    client.0:
+      extra_attrs: ['sts_test']
+      force-branch: master
+      rgw_server: client.0
+overrides:
+  ceph:
+    conf:
+      client:
+              rgw sts key: abcdefghijklmnop
+              rgw s3 auth use sts: true
index d0c75cfe63e394d616a655de4d0fc8d858674d66..c0d5af5f3f0e6e06c8d8c70010090a53c1865ae3 100644 (file)
@@ -97,7 +97,7 @@ def create_users(ctx, config):
     assert isinstance(config, dict)
     log.info('Creating rgw users...')
     testdir = teuthology.get_testdir(ctx)
-    users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser'}
+    users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser', 'iam': 'foobar'}
     for client in config['clients']:
         s3tests_conf = config['s3tests_conf'][client]
         s3tests_conf.setdefault('fixtures', {})
@@ -107,40 +107,83 @@ def create_users(ctx, config):
             log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
             cluster_name, daemon_type, client_id = teuthology.split_role(client)
             client_with_id = daemon_type + '.' + client_id
-            ctx.cluster.only(client).run(
-                args=[
-                    'adjust-ulimits',
-                    'ceph-coverage',
-                    '{tdir}/archive/coverage'.format(tdir=testdir),
-                    'radosgw-admin',
-                    '-n', client_with_id,
-                    'user', 'create',
-                    '--uid', s3tests_conf[section]['user_id'],
-                    '--display-name', s3tests_conf[section]['display_name'],
-                    '--access-key', s3tests_conf[section]['access_key'],
-                    '--secret', s3tests_conf[section]['secret_key'],
-                    '--email', s3tests_conf[section]['email'],
-                    '--caps', 'user-policy=*',
-                    '--cluster', cluster_name,
-                ],
-            )
-            ctx.cluster.only(client).run(
-                args=[
-                    'adjust-ulimits',
-                    'ceph-coverage',
-                    '{tdir}/archive/coverage'.format(tdir=testdir),
-                    'radosgw-admin',
-                    '-n', client_with_id,
-                    'mfa', 'create',
-                    '--uid', s3tests_conf[section]['user_id'],
-                    '--totp-serial', s3tests_conf[section]['totp_serial'],
-                    '--totp-seed', s3tests_conf[section]['totp_seed'],
-                    '--totp-seconds', s3tests_conf[section]['totp_seconds'],
-                    '--totp-window', '8',
-                    '--totp-seed-type', 'base32',
-                    '--cluster', cluster_name,
-                ],
-            )
+            if section=='iam':
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client_with_id,
+                        'user', 'create',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--display-name', s3tests_conf[section]['display_name'],
+                        '--access-key', s3tests_conf[section]['access_key'],
+                        '--secret', s3tests_conf[section]['secret_key'],
+                        '--cluster', cluster_name,
+                    ],
+                )
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client_with_id,
+                        'caps', 'add',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--caps', 'user-policy=*',
+                        '--cluster', cluster_name,
+                    ],
+                )
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client_with_id,
+                        'caps', 'add',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--caps', 'roles=*',
+                        '--cluster', cluster_name,
+                    ],
+                )
+            else: 
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client_with_id,
+                        'user', 'create',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--display-name', s3tests_conf[section]['display_name'],
+                        '--access-key', s3tests_conf[section]['access_key'],
+                        '--secret', s3tests_conf[section]['secret_key'],
+                        '--email', s3tests_conf[section]['email'],
+                        '--caps', 'user-policy=*',
+                        '--cluster', cluster_name,
+                    ],
+                )
+                ctx.cluster.only(client).run(
+                    args=[
+                        'adjust-ulimits',
+                        'ceph-coverage',
+                        '{tdir}/archive/coverage'.format(tdir=testdir),
+                        'radosgw-admin',
+                        '-n', client_with_id,
+                        'mfa', 'create',
+                        '--uid', s3tests_conf[section]['user_id'],
+                        '--totp-serial', s3tests_conf[section]['totp_serial'],
+                        '--totp-seed', s3tests_conf[section]['totp_seed'],
+                        '--totp-seconds', s3tests_conf[section]['totp_seconds'],
+                        '--totp-window', '8',
+                        '--totp-seed-type', 'base32',
+                        '--cluster', cluster_name,
+                    ],
+                )
     try:
         yield
     finally:
@@ -289,9 +332,13 @@ def run_tests(ctx, config):
         else:
             args += ['REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt']
         # civetweb > 1.8 && beast parsers are strict on rfc2616
-        attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!s3select"]
+        attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!s3select","!sts_test"]
         if client_config.get('calling-format') != 'ordinary':
             attrs += ['!fails_with_subdomain']
+       
+        if 'extra_attrs' in client_config:
+            attrs = client_config.get('extra_attrs')
+
         args += [
             '{tdir}/s3-tests/virtualenv/bin/python'.format(tdir=testdir),
             '-m', 'nose',
@@ -432,6 +479,7 @@ def task(ctx, config):
                 's3 main'  : {},
                 's3 alt'   : {},
                's3 tenant': {},
+                'iam'      : {},
                 }
             )