--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ bluestore allocator: bitmap
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+ bdev enable discard: true
+ bdev async discard: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+ bdev enable discard: true
+ bdev async discard: true
assert isinstance(config, dict)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
- users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser'}
+ users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser', 'iam': 'foobar'}
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('fixtures', {})
log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client_with_id,
- 'user', 'create',
- '--uid', s3tests_conf[section]['user_id'],
- '--display-name', s3tests_conf[section]['display_name'],
- '--access-key', s3tests_conf[section]['access_key'],
- '--secret', s3tests_conf[section]['secret_key'],
- '--email', s3tests_conf[section]['email'],
- '--caps', 'user-policy=*',
- '--cluster', cluster_name,
- ],
- )
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client_with_id,
- 'mfa', 'create',
- '--uid', s3tests_conf[section]['user_id'],
- '--totp-serial', s3tests_conf[section]['totp_serial'],
- '--totp-seed', s3tests_conf[section]['totp_seed'],
- '--totp-seconds', s3tests_conf[section]['totp_seconds'],
- '--totp-window', '8',
- '--totp-seed-type', 'base32',
- '--cluster', cluster_name,
- ],
- )
+ if section=='iam':
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client_with_id,
+ 'user', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--display-name', s3tests_conf[section]['display_name'],
+ '--access-key', s3tests_conf[section]['access_key'],
+ '--secret', s3tests_conf[section]['secret_key'],
+ '--cluster', cluster_name,
+ ],
+ )
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client_with_id,
+ 'caps', 'add',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--caps', 'user-policy=*',
+ '--cluster', cluster_name,
+ ],
+ )
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client_with_id,
+ 'caps', 'add',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--caps', 'roles=*',
+ '--cluster', cluster_name,
+ ],
+ )
+ else:
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client_with_id,
+ 'user', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--display-name', s3tests_conf[section]['display_name'],
+ '--access-key', s3tests_conf[section]['access_key'],
+ '--secret', s3tests_conf[section]['secret_key'],
+ '--email', s3tests_conf[section]['email'],
+ '--caps', 'user-policy=*',
+ '--cluster', cluster_name,
+ ],
+ )
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client_with_id,
+ 'mfa', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--totp-serial', s3tests_conf[section]['totp_serial'],
+ '--totp-seed', s3tests_conf[section]['totp_seed'],
+ '--totp-seconds', s3tests_conf[section]['totp_seconds'],
+ '--totp-window', '8',
+ '--totp-seed-type', 'base32',
+ '--cluster', cluster_name,
+ ],
+ )
try:
yield
finally:
else:
args += ['REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt']
# civetweb > 1.8 && beast parsers are strict on rfc2616
- attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!s3select"]
+ attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616","!s3select","!sts_test"]
if client_config.get('calling-format') != 'ordinary':
attrs += ['!fails_with_subdomain']
+
+ if 'extra_attrs' in client_config:
+ attrs = client_config.get('extra_attrs')
+
args += [
'{tdir}/s3-tests/virtualenv/bin/python'.format(tdir=testdir),
'-m', 'nose',
's3 main' : {},
's3 alt' : {},
's3 tenant': {},
+ 'iam' : {},
}
)