]> git.apps.os.sepia.ceph.com Git - s3-tests.git/commitdiff
rename s3tests_boto3/ back to s3tests/ ceph-squid
authorCasey Bodley <cbodley@redhat.com>
Fri, 19 Sep 2025 14:12:33 +0000 (10:12 -0400)
committerCasey Bodley <cbodley@redhat.com>
Wed, 15 Oct 2025 14:52:42 +0000 (10:52 -0400)
Signed-off-by: Casey Bodley <cbodley@redhat.com>
(cherry picked from commit 73c3b988a964e8897f74cba4f5c913663d294794)

29 files changed:
README.rst
s3tests/__init__.py [new file with mode: 0644]
s3tests/common.py [new file with mode: 0644]
s3tests/functional/__init__.py [new file with mode: 0644]
s3tests/functional/iam.py [new file with mode: 0644]
s3tests/functional/policy.py [new file with mode: 0644]
s3tests/functional/rgw_interactive.py [new file with mode: 0644]
s3tests/functional/test_headers.py [new file with mode: 0644]
s3tests/functional/test_iam.py [new file with mode: 0644]
s3tests/functional/test_s3.py [new file with mode: 0644]
s3tests/functional/test_s3select.py [new file with mode: 0644]
s3tests/functional/test_sns.py [new file with mode: 0644]
s3tests/functional/test_sts.py [new file with mode: 0644]
s3tests/functional/test_utils.py [new file with mode: 0644]
s3tests/functional/utils.py [new file with mode: 0644]
s3tests_boto3/__init__.py [deleted file]
s3tests_boto3/common.py [deleted file]
s3tests_boto3/functional/__init__.py [deleted file]
s3tests_boto3/functional/iam.py [deleted file]
s3tests_boto3/functional/policy.py [deleted file]
s3tests_boto3/functional/rgw_interactive.py [deleted file]
s3tests_boto3/functional/test_headers.py [deleted file]
s3tests_boto3/functional/test_iam.py [deleted file]
s3tests_boto3/functional/test_s3.py [deleted file]
s3tests_boto3/functional/test_s3select.py [deleted file]
s3tests_boto3/functional/test_sns.py [deleted file]
s3tests_boto3/functional/test_sts.py [deleted file]
s3tests_boto3/functional/test_utils.py [deleted file]
s3tests_boto3/functional/utils.py [deleted file]

index 57e3c848b948bb7cda8070a60d68f41f04f4093e..4c2bc34fd4b1c1a529046fc1af5da9302c1181d3 100644 (file)
@@ -22,15 +22,15 @@ Once you have that file copied and edited, you can run the tests with::
 
 You can specify which directory of tests to run::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional
+       S3TEST_CONF=your.conf tox -- s3tests/functional
 
 You can specify which file of tests to run::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_s3.py
+       S3TEST_CONF=your.conf tox -- s3tests/functional/test_s3.py
 
 You can specify which test to run::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_s3.py::test_bucket_list_empty
+       S3TEST_CONF=your.conf tox -- s3tests/functional/test_s3.py::test_bucket_list_empty
 
 Some tests have attributes set based on their current reliability and
 things like AWS not enforcing their spec stricly. You can filter tests
@@ -44,13 +44,13 @@ located in the ``s3test_boto3`` directory.
 
 You can run only the boto3 tests with::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional
+       S3TEST_CONF=your.conf tox -- s3tests/functional
 
 ========================
  STS compatibility tests
 ========================
 
-This section contains some basic tests for the AssumeRole, GetSessionToken and AssumeRoleWithWebIdentity API's. The test file is located under ``s3tests_boto3/functional``.
+This section contains some basic tests for the AssumeRole, GetSessionToken and AssumeRoleWithWebIdentity API's. The test file is located under ``s3tests/functional``.
 
 To run the STS tests, the vstart cluster should be started with the following parameter (in addition to any parameters already used with it)::
 
@@ -63,11 +63,11 @@ After the cluster is up the following command should be executed::
 
 You can run only the sts tests (all the three API's) with::
 
-        S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_sts.py
+        S3TEST_CONF=your.conf tox -- s3tests/functional/test_sts.py
 
 You can filter tests based on the attributes. There is a attribute named ``test_of_sts`` to run AssumeRole and GetSessionToken tests and ``webidentity_test`` to run the AssumeRoleWithWebIdentity tests. If you want to execute only ``test_of_sts`` tests you can apply that filter as below::
 
-        S3TEST_CONF=your.conf tox -- -m test_of_sts s3tests_boto3/functional/test_sts.py
+        S3TEST_CONF=your.conf tox -- -m test_of_sts s3tests/functional/test_sts.py
 
 For running ``webidentity_test`` you'll need have Keycloak running.
 
@@ -89,14 +89,14 @@ Adding above capabilities to "iam" user is also taken care by vstart (If Ceph cl
 To run these tests, create configuration file with section "iam" and "s3 alt" refer s3tests.conf.SAMPLE.
 Once you have that configuration file copied and edited, you can run all the tests with::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py
+       S3TEST_CONF=your.conf tox -- s3tests/functional/test_iam.py
 
 You can also specify specific test to run::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py::test_put_user_policy
+       S3TEST_CONF=your.conf tox -- s3tests/functional/test_iam.py::test_put_user_policy
 
 Some tests have attributes set such as "fails_on_rgw".
 You can filter tests based on their attributes::
 
-       S3TEST_CONF=your.conf tox -- s3tests_boto3/functional/test_iam.py -m 'not fails_on_rgw'
+       S3TEST_CONF=your.conf tox -- s3tests/functional/test_iam.py -m 'not fails_on_rgw'
 
diff --git a/s3tests/__init__.py b/s3tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/s3tests/common.py b/s3tests/common.py
new file mode 100644 (file)
index 0000000..987ec6b
--- /dev/null
@@ -0,0 +1,301 @@
+import boto.s3.connection
+import munch
+import itertools
+import os
+import random
+import string
+import yaml
+import re
+from lxml import etree
+
+from doctest import Example
+from lxml.doctestcompare import LXMLOutputChecker
+
+s3 = munch.Munch()
+config = munch.Munch()
+prefix = ''
+
+bucket_counter = itertools.count(1)
+key_counter = itertools.count(1)
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+def nuke_bucket(bucket):
+    try:
+        bucket.set_canned_acl('private')
+        # TODO: deleted_cnt and the while loop is a work around for rgw
+        # not sending the
+        deleted_cnt = 1
+        while deleted_cnt:
+            deleted_cnt = 0
+            for key in bucket.list():
+                print('Cleaning bucket {bucket} key {key}'.format(
+                    bucket=bucket,
+                    key=key,
+                    ))
+                key.set_canned_acl('private')
+                key.delete()
+                deleted_cnt += 1
+        bucket.delete()
+    except boto.exception.S3ResponseError as e:
+        # TODO workaround for buggy rgw that fails to send
+        # error_code, remove
+        if (e.status == 403
+            and e.error_code is None
+            and e.body == ''):
+            e.error_code = 'AccessDenied'
+        if e.error_code != 'AccessDenied':
+            print('GOT UNWANTED ERROR', e.error_code)
+            raise
+        # seems like we're not the owner of the bucket; ignore
+        pass
+
+def nuke_prefixed_buckets():
+    for name, conn in list(s3.items()):
+        print('Cleaning buckets from connection {name}'.format(name=name))
+        for bucket in conn.get_all_buckets():
+            if bucket.name.startswith(prefix):
+                print('Cleaning bucket {bucket}'.format(bucket=bucket))
+                nuke_bucket(bucket)
+
+    print('Done with cleanup of test buckets.')
+
+def read_config(fp):
+    config = munch.Munch()
+    g = yaml.safe_load_all(fp)
+    for new in g:
+        config.update(munch.Munchify(new))
+    return config
+
+def connect(conf):
+    mapping = dict(
+        port='port',
+        host='host',
+        is_secure='is_secure',
+        access_key='aws_access_key_id',
+        secret_key='aws_secret_access_key',
+        )
+    kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
+    #process calling_format argument
+    calling_formats = dict(
+        ordinary=boto.s3.connection.OrdinaryCallingFormat(),
+        subdomain=boto.s3.connection.SubdomainCallingFormat(),
+        vhost=boto.s3.connection.VHostCallingFormat(),
+        )
+    kwargs['calling_format'] = calling_formats['ordinary']
+    if 'calling_format' in conf:
+        raw_calling_format = conf['calling_format']
+        try:
+            kwargs['calling_format'] = calling_formats[raw_calling_format]
+        except KeyError:
+            raise RuntimeError(
+                'calling_format unknown: %r' % raw_calling_format
+                )
+    # TODO test vhost calling format
+    conn = boto.s3.connection.S3Connection(**kwargs)
+    return conn
+
+def setup():
+    global s3, config, prefix
+    s3.clear()
+    config.clear()
+
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    with file(path) as f:
+        config.update(read_config(f))
+
+    # These 3 should always be present.
+    if 's3' not in config:
+        raise RuntimeError('Your config file is missing the s3 section!')
+    if 'defaults' not in config.s3:
+        raise RuntimeError('Your config file is missing the s3.defaults section!')
+    if 'fixtures' not in config:
+        raise RuntimeError('Your config file is missing the fixtures section!')
+
+    template = config.fixtures.get('bucket prefix', 'test-{random}-')
+    prefix = choose_bucket_prefix(template=template)
+    if prefix == '':
+        raise RuntimeError("Empty Prefix! Aborting!")
+
+    defaults = config.s3.defaults
+    for section in list(config.s3.keys()):
+        if section == 'defaults':
+            continue
+
+        conf = {}
+        conf.update(defaults)
+        conf.update(config.s3[section])
+        conn = connect(conf)
+        s3[section] = conn
+
+    # WARNING! we actively delete all buckets we see with the prefix
+    # we've chosen! Choose your prefix with care, and don't reuse
+    # credentials!
+
+    # We also assume nobody else is going to use buckets with that
+    # prefix. This is racy but given enough randomness, should not
+    # really fail.
+    nuke_prefixed_buckets()
+
+def get_new_bucket(connection=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if connection is None:
+        connection = s3.main
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    # the only way for this to fail with a pre-existing bucket is if
+    # someone raced us between setup nuke_prefixed_buckets and here;
+    # ignore that as astronomically unlikely
+    bucket = connection.create_bucket(name)
+    return bucket
+
+def teardown():
+    nuke_prefixed_buckets()
+
+def with_setup_kwargs(setup, teardown=None):
+    """Decorator to add setup and/or teardown methods to a test function::
+
+      @with_setup_args(setup, teardown)
+      def test_something():
+          " ... "
+
+    The setup function should return (kwargs) which will be passed to
+    test function, and teardown function.
+
+    Note that `with_setup_kwargs` is useful *only* for test functions, not for test
+    methods or inside of TestCase subclasses.
+    """
+    def decorate(func):
+        kwargs = {}
+
+        def test_wrapped(*args, **kwargs2):
+            k2 = kwargs.copy()
+            k2.update(kwargs2)
+            k2['testname'] = func.__name__
+            func(*args, **k2)
+
+        test_wrapped.__name__ = func.__name__
+
+        def setup_wrapped():
+            k = setup()
+            kwargs.update(k)
+            if hasattr(func, 'setup'):
+                func.setup()
+        test_wrapped.setup = setup_wrapped
+
+        if teardown:
+            def teardown_wrapped():
+                if hasattr(func, 'teardown'):
+                    func.teardown()
+                teardown(**kwargs)
+
+            test_wrapped.teardown = teardown_wrapped
+        else:
+            if hasattr(func, 'teardown'):
+                test_wrapped.teardown = func.teardown()
+        return test_wrapped
+    return decorate
+
+# Demo case for the above, when you run test_gen():
+# _test_gen will run twice,
+# with the following stderr printing
+# setup_func {'b': 2}
+# testcase ('1',) {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# setup_func {'b': 2}
+# testcase () {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# 
+#def setup_func():
+#    kwargs = {'b': 2}
+#    print("setup_func", kwargs, file=sys.stderr)
+#    return kwargs
+#
+#def teardown_func(**kwargs):
+#    print("teardown_func", kwargs, file=sys.stderr)
+#
+#@with_setup_kwargs(setup=setup_func, teardown=teardown_func)
+#def _test_gen(*args, **kwargs):
+#    print("testcase", args, kwargs, file=sys.stderr)
+#
+#def test_gen():
+#    yield _test_gen, '1'
+#    yield _test_gen
+
+def trim_xml(xml_str):
+    p = etree.XMLParser(remove_blank_text=True)
+    elem = etree.XML(xml_str, parser=p)
+    return etree.tostring(elem)
+
+def normalize_xml(xml, pretty_print=True):
+    if xml is None:
+        return xml
+
+    root = etree.fromstring(xml.encode(encoding='ascii'))
+
+    for element in root.iter('*'):
+        if element.text is not None and not element.text.strip():
+            element.text = None
+        if element.text is not None:
+            element.text = element.text.strip().replace("\n", "").replace("\r", "")
+        if element.tail is not None and not element.tail.strip():
+            element.tail = None
+        if element.tail is not None:
+            element.tail = element.tail.strip().replace("\n", "").replace("\r", "")
+
+    # Sort the elements
+    for parent in root.xpath('//*[./*]'): # Search for parent elements
+          parent[:] = sorted(parent,key=lambda x: x.tag)
+
+    xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
+    # there are two different DTD URIs
+    xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
+    xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
+    for uri in ['http://doc.s3.amazonaws.com/doc/2006-03-01/', 'http://s3.amazonaws.com/doc/2006-03-01/']:
+        xmlstr = xmlstr.replace(uri, 'URI-DTD')
+    #xmlstr = re.sub(r'>\s+', '>', xmlstr, count=0, flags=re.MULTILINE)
+    return xmlstr
+
+def assert_xml_equal(got, want):
+    assert want is not None, 'Wanted XML cannot be None'
+    if got is None:
+        raise AssertionError('Got input to validate was None')
+    checker = LXMLOutputChecker()
+    if not checker.check_output(want, got, 0):
+        message = checker.output_difference(Example("", want), got, 0)
+        raise AssertionError(message)
diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py
new file mode 100644 (file)
index 0000000..555cafc
--- /dev/null
@@ -0,0 +1,766 @@
+import pytest
+import boto3
+from botocore import UNSIGNED
+from botocore.client import Config
+from botocore.exceptions import ClientError
+from botocore.handlers import disable_signing
+import configparser
+import datetime
+import time
+import os
+import munch
+import random
+import string
+import itertools
+import urllib3
+import re
+
+config = munch.Munch
+
+# this will be assigned by setup()
+prefix = None
+
+def get_prefix():
+    assert prefix is not None
+    return prefix
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+def get_buckets_list(client=None, prefix=None):
+    if client == None:
+        client = get_client()
+    if prefix == None:
+        prefix = get_prefix()
+    response = client.list_buckets()
+    bucket_dicts = response['Buckets']
+    buckets_list = []
+    for bucket in bucket_dicts:
+        if prefix in bucket['Name']:
+            buckets_list.append(bucket['Name'])
+
+    return buckets_list
+
+def get_objects_list(bucket, client=None, prefix=None):
+    if client == None:
+        client = get_client()
+
+    if prefix == None:
+        response = client.list_objects(Bucket=bucket)
+    else:
+        response = client.list_objects(Bucket=bucket, Prefix=prefix)
+    objects_list = []
+
+    if 'Contents' in response:
+        contents = response['Contents']
+        for obj in contents:
+            objects_list.append(obj['Key'])
+
+    return objects_list
+
+# generator function that returns object listings in batches, where each
+# batch is a list of dicts compatible with delete_objects()
+def list_versions(client, bucket, batch_size):
+    kwargs = {'Bucket': bucket, 'MaxKeys': batch_size}
+    truncated = True
+    while truncated:
+        listing = client.list_object_versions(**kwargs)
+
+        kwargs['KeyMarker'] = listing.get('NextKeyMarker')
+        kwargs['VersionIdMarker'] = listing.get('NextVersionIdMarker')
+        truncated = listing['IsTruncated']
+
+        objs = listing.get('Versions', []) + listing.get('DeleteMarkers', [])
+        if len(objs):
+            yield [{'Key': o['Key'], 'VersionId': o['VersionId']} for o in objs]
+
+def nuke_bucket(client, bucket):
+    batch_size = 128
+    max_retain_date = None
+
+    # list and delete objects in batches
+    for objects in list_versions(client, bucket, batch_size):
+        delete = client.delete_objects(Bucket=bucket,
+                Delete={'Objects': objects, 'Quiet': True},
+                BypassGovernanceRetention=True)
+
+        # check for object locks on 403 AccessDenied errors
+        for err in delete.get('Errors', []):
+            if err.get('Code') != 'AccessDenied':
+                continue
+            try:
+                res = client.get_object_retention(Bucket=bucket,
+                        Key=err['Key'], VersionId=err['VersionId'])
+                retain_date = res['Retention']['RetainUntilDate']
+                if not max_retain_date or max_retain_date < retain_date:
+                    max_retain_date = retain_date
+            except ClientError:
+                pass
+
+    if max_retain_date:
+        # wait out the retention period (up to 60 seconds)
+        now = datetime.datetime.now(max_retain_date.tzinfo)
+        if max_retain_date > now:
+            delta = max_retain_date - now
+            if delta.total_seconds() > 60:
+                raise RuntimeError('bucket {} still has objects \
+locked for {} more seconds, not waiting for \
+bucket cleanup'.format(bucket, delta.total_seconds()))
+            print('nuke_bucket', bucket, 'waiting', delta.total_seconds(),
+                    'seconds for object locks to expire')
+            time.sleep(delta.total_seconds())
+
+        for objects in list_versions(client, bucket, batch_size):
+            client.delete_objects(Bucket=bucket,
+                    Delete={'Objects': objects, 'Quiet': True},
+                    BypassGovernanceRetention=True)
+
+    client.delete_bucket(Bucket=bucket)
+
+def nuke_prefixed_buckets(prefix, client=None):
+    if client == None:
+        client = get_client()
+
+    buckets = get_buckets_list(client, prefix)
+
+    err = None
+    for bucket_name in buckets:
+        try:
+            nuke_bucket(client, bucket_name)
+        except Exception as e:
+            # The exception shouldn't be raised when doing cleanup. Pass and continue
+            # the bucket cleanup process. Otherwise left buckets wouldn't be cleared
+            # resulting in some kind of resource leak. err is used to hint user some
+            # exception once occurred.
+            err = e
+            pass
+    if err:
+        raise err
+
+    print('Done with cleanup of buckets in tests.')
+
+def configured_storage_classes():
+    sc = ['STANDARD']
+
+    extra_sc = re.split(r"[\b\W\b]+", config.storage_classes)
+
+    for item in extra_sc:
+        if item != 'STANDARD':
+             sc.append(item)
+
+    sc = [i for i in sc if i]
+    print("storage classes configured: " + str(sc))
+
+    return sc
+
+def setup():
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+
+    if not cfg.defaults():
+        raise RuntimeError('Your config file is missing the DEFAULT section!')
+    if not cfg.has_section("s3 main"):
+        raise RuntimeError('Your config file is missing the "s3 main" section!')
+    if not cfg.has_section("s3 alt"):
+        raise RuntimeError('Your config file is missing the "s3 alt" section!')
+    if not cfg.has_section("s3 tenant"):
+        raise RuntimeError('Your config file is missing the "s3 tenant" section!')
+
+    global prefix
+
+    defaults = cfg.defaults()
+
+    # vars from the DEFAULT section
+    config.default_host = defaults.get("host")
+    config.default_port = int(defaults.get("port"))
+    config.default_is_secure = cfg.getboolean('DEFAULT', "is_secure")
+
+    proto = 'https' if config.default_is_secure else 'http'
+    config.default_endpoint = "%s://%s:%d" % (proto, config.default_host, config.default_port)
+
+    try:
+        config.default_ssl_verify = cfg.getboolean('DEFAULT', "ssl_verify")
+    except configparser.NoOptionError:
+        config.default_ssl_verify = False
+
+    # Disable InsecureRequestWarning reported by urllib3 when ssl_verify is False
+    if not config.default_ssl_verify:
+        urllib3.disable_warnings()
+
+    # vars from the main section
+    config.main_access_key = cfg.get('s3 main',"access_key")
+    config.main_secret_key = cfg.get('s3 main',"secret_key")
+    config.main_display_name = cfg.get('s3 main',"display_name")
+    config.main_user_id = cfg.get('s3 main',"user_id")
+    config.main_email = cfg.get('s3 main',"email")
+    try:
+        config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_kms_keyid = 'testkey-1'
+
+    try:
+        config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_kms_keyid2 = 'testkey-2'
+
+    try:
+        config.main_api_name = cfg.get('s3 main',"api_name")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_api_name = ""
+        pass
+
+    try:
+        config.storage_classes = cfg.get('s3 main',"storage_classes")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.storage_classes = ""
+        pass
+
+    try:
+        config.lc_debug_interval = int(cfg.get('s3 main',"lc_debug_interval"))
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.lc_debug_interval = 10
+
+    config.alt_access_key = cfg.get('s3 alt',"access_key")
+    config.alt_secret_key = cfg.get('s3 alt',"secret_key")
+    config.alt_display_name = cfg.get('s3 alt',"display_name")
+    config.alt_user_id = cfg.get('s3 alt',"user_id")
+    config.alt_email = cfg.get('s3 alt',"email")
+
+    config.tenant_access_key = cfg.get('s3 tenant',"access_key")
+    config.tenant_secret_key = cfg.get('s3 tenant',"secret_key")
+    config.tenant_display_name = cfg.get('s3 tenant',"display_name")
+    config.tenant_user_id = cfg.get('s3 tenant',"user_id")
+    config.tenant_email = cfg.get('s3 tenant',"email")
+
+    config.iam_access_key = cfg.get('iam',"access_key")
+    config.iam_secret_key = cfg.get('iam',"secret_key")
+    config.iam_display_name = cfg.get('iam',"display_name")
+    config.iam_user_id = cfg.get('iam',"user_id")
+    config.iam_email = cfg.get('iam',"email")
+
+    config.iam_root_access_key = cfg.get('iam root',"access_key")
+    config.iam_root_secret_key = cfg.get('iam root',"secret_key")
+    config.iam_root_user_id = cfg.get('iam root',"user_id")
+    config.iam_root_email = cfg.get('iam root',"email")
+
+    config.iam_alt_root_access_key = cfg.get('iam alt root',"access_key")
+    config.iam_alt_root_secret_key = cfg.get('iam alt root',"secret_key")
+    config.iam_alt_root_user_id = cfg.get('iam alt root',"user_id")
+    config.iam_alt_root_email = cfg.get('iam alt root',"email")
+
+    # vars from the fixtures section
+    template = cfg.get('fixtures', "bucket prefix", fallback='test-{random}-')
+    prefix = choose_bucket_prefix(template=template)
+    template = cfg.get('fixtures', "iam name prefix", fallback="s3-tests-")
+    config.iam_name_prefix = choose_bucket_prefix(template=template)
+    template = cfg.get('fixtures', "iam path prefix", fallback="/s3-tests/")
+    config.iam_path_prefix = choose_bucket_prefix(template=template)
+
+    alt_client = get_alt_client()
+    tenant_client = get_tenant_client()
+    nuke_prefixed_buckets(prefix=prefix)
+    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
+    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
+
+    if cfg.has_section("s3 cloud"):
+        get_cloud_config(cfg)
+    else:
+        config.cloud_storage_class = None
+
+
+def teardown():
+    alt_client = get_alt_client()
+    tenant_client = get_tenant_client()
+    nuke_prefixed_buckets(prefix=prefix)
+    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
+    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
+    try:
+        iam_client = get_iam_client()
+        list_roles_resp = iam_client.list_roles()
+        for role in list_roles_resp['Roles']:
+            list_policies_resp = iam_client.list_role_policies(RoleName=role['RoleName'])
+            for policy in list_policies_resp['PolicyNames']:
+                del_policy_resp = iam_client.delete_role_policy(
+                                         RoleName=role['RoleName'],
+                                         PolicyName=policy
+                                        )
+            del_role_resp = iam_client.delete_role(RoleName=role['RoleName'])
+        list_oidc_resp = iam_client.list_open_id_connect_providers()
+        for oidcprovider in list_oidc_resp['OpenIDConnectProviderList']:
+            del_oidc_resp = iam_client.delete_open_id_connect_provider(
+                        OpenIDConnectProviderArn=oidcprovider['Arn']
+                    )
+    except:
+        pass
+
+@pytest.fixture(scope="package")
+def configfile():
+    setup()
+    return config
+
+@pytest.fixture(autouse=True)
+def setup_teardown(configfile):
+    yield
+    teardown()
+
+def check_webidentity():
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+    if not cfg.has_section("webidentity"):
+        raise RuntimeError('Your config file is missing the "webidentity" section!')
+
+    config.webidentity_thumbprint = cfg.get('webidentity', "thumbprint")
+    config.webidentity_aud = cfg.get('webidentity', "aud")
+    config.webidentity_token = cfg.get('webidentity', "token")
+    config.webidentity_realm = cfg.get('webidentity', "KC_REALM")
+    config.webidentity_sub = cfg.get('webidentity', "sub")
+    config.webidentity_azp = cfg.get('webidentity', "azp")
+    config.webidentity_user_token = cfg.get('webidentity', "user_token")
+
+def get_cloud_config(cfg):
+    config.cloud_host = cfg.get('s3 cloud',"host")
+    config.cloud_port = int(cfg.get('s3 cloud',"port"))
+    config.cloud_is_secure = cfg.getboolean('s3 cloud', "is_secure")
+
+    proto = 'https' if config.cloud_is_secure else 'http'
+    config.cloud_endpoint = "%s://%s:%d" % (proto, config.cloud_host, config.cloud_port)
+
+    config.cloud_access_key = cfg.get('s3 cloud',"access_key")
+    config.cloud_secret_key = cfg.get('s3 cloud',"secret_key")
+
+    try:
+        config.cloud_storage_class = cfg.get('s3 cloud', "cloud_storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_storage_class = None
+    
+    try:
+        config.cloud_retain_head_object = cfg.get('s3 cloud',"retain_head_object")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_retain_head_object = None
+
+    try:
+        config.cloud_target_path = cfg.get('s3 cloud',"target_path")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_target_path = None
+
+    try:
+        config.cloud_target_storage_class = cfg.get('s3 cloud',"target_storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_target_storage_class = 'STANDARD'
+
+    try:
+        config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_regular_storage_class  = None
+
+
+def get_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_v2_client():
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version='s3'))
+    return client
+
+def get_sts_client(**kwargs):
+    kwargs.setdefault('aws_access_key_id', config.alt_access_key)
+    kwargs.setdefault('aws_secret_access_key', config.alt_secret_key)
+    kwargs.setdefault('config', Config(signature_version='s3v4'))
+
+    client = boto3.client(service_name='sts',
+                          endpoint_url=config.default_endpoint,
+                          region_name='',
+                          use_ssl=config.default_is_secure,
+                          verify=config.default_ssl_verify,
+                          **kwargs)
+    return client
+
+def get_iam_client(**kwargs):
+    kwargs.setdefault('aws_access_key_id', config.iam_access_key)
+    kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
+
+    client = boto3.client(service_name='iam',
+                        endpoint_url=config.default_endpoint,
+                        region_name='',
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        **kwargs)
+    return client
+
+def get_iam_s3client(**kwargs):
+    kwargs.setdefault('aws_access_key_id', config.iam_access_key)
+    kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
+    kwargs.setdefault('config', Config(signature_version='s3v4'))
+
+    client = boto3.client(service_name='s3',
+                          endpoint_url=config.default_endpoint,
+                          use_ssl=config.default_is_secure,
+                          verify=config.default_ssl_verify,
+                          **kwargs)
+    return client
+
+def get_iam_root_client(**kwargs):
+    kwargs.setdefault('service_name', 'iam')
+    kwargs.setdefault('aws_access_key_id', config.iam_root_access_key)
+    kwargs.setdefault('aws_secret_access_key', config.iam_root_secret_key)
+
+    return boto3.client(endpoint_url=config.default_endpoint,
+                        region_name='',
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        **kwargs)
+
+def get_iam_alt_root_client(**kwargs):
+    kwargs.setdefault('service_name', 'iam')
+    kwargs.setdefault('aws_access_key_id', config.iam_alt_root_access_key)
+    kwargs.setdefault('aws_secret_access_key', config.iam_alt_root_secret_key)
+
+    return boto3.client(endpoint_url=config.default_endpoint,
+                        region_name='',
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        **kwargs)
+
+def get_alt_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.alt_access_key,
+                        aws_secret_access_key=config.alt_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_cloud_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.cloud_access_key,
+                        aws_secret_access_key=config.cloud_secret_key,
+                        endpoint_url=config.cloud_endpoint,
+                        use_ssl=config.cloud_is_secure,
+                        config=client_config)
+    return client
+
+def get_tenant_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.tenant_access_key,
+                        aws_secret_access_key=config.tenant_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_tenant_iam_client():
+
+    client = boto3.client(service_name='iam',
+                          region_name='us-east-1',
+                          aws_access_key_id=config.tenant_access_key,
+                          aws_secret_access_key=config.tenant_secret_key,
+                          endpoint_url=config.default_endpoint,
+                          verify=config.default_ssl_verify,
+                          use_ssl=config.default_is_secure)
+    return client
+
+def get_alt_iam_client():
+
+    client = boto3.client(service_name='iam',
+                          region_name='',
+                          aws_access_key_id=config.alt_access_key,
+                          aws_secret_access_key=config.alt_secret_key,
+                          endpoint_url=config.default_endpoint,
+                          verify=config.default_ssl_verify,
+                          use_ssl=config.default_is_secure)
+    return client
+
+def get_unauthenticated_client():
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id='',
+                        aws_secret_access_key='',
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version=UNSIGNED))
+    return client
+
+def get_bad_auth_client(aws_access_key_id='badauth'):
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=aws_access_key_id,
+                        aws_secret_access_key='roflmao',
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version='s3v4'))
+    return client
+
+def get_svc_client(client_config=None, svc='s3'):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name=svc,
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+bucket_counter = itertools.count(1)
+
+def get_new_bucket_name():
+    """
+    Get a bucket name that probably does not exist.
+
+    We make every attempt to use a unique random prefix, so if a
+    bucket by this name happens to exist, it's ok if tests give
+    false negatives.
+    """
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    return name
+
+def get_new_bucket_resource(name=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    s3 = boto3.resource('s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify)
+    if name is None:
+        name = get_new_bucket_name()
+    bucket = s3.Bucket(name)
+    bucket_location = bucket.create()
+    return bucket
+
+def get_new_bucket(client=None, name=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if client is None:
+        client = get_client()
+    if name is None:
+        name = get_new_bucket_name()
+
+    client.create_bucket(Bucket=name)
+    return name
+
+def get_parameter_name():
+    parameter_name=""
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+    while rand:
+        parameter_name = '{random}'.format(random=rand)
+        if len(parameter_name) <= 10:
+            return parameter_name
+        rand = rand[:-1]
+    return parameter_name
+
+def get_sts_user_id():
+    return config.alt_user_id
+
+def get_config_is_secure():
+    return config.default_is_secure
+
+def get_config_host():
+    return config.default_host
+
+def get_config_port():
+    return config.default_port
+
+def get_config_endpoint():
+    return config.default_endpoint
+
+def get_config_ssl_verify():
+    return config.default_ssl_verify
+
+def get_main_aws_access_key():
+    return config.main_access_key
+
+def get_main_aws_secret_key():
+    return config.main_secret_key
+
+def get_main_display_name():
+    return config.main_display_name
+
+def get_main_user_id():
+    return config.main_user_id
+
+def get_main_email():
+    return config.main_email
+
+def get_main_api_name():
+    return config.main_api_name
+
+def get_main_kms_keyid():
+    return config.main_kms_keyid
+
+def get_secondary_kms_keyid():
+    return config.main_kms_keyid2
+
+def get_alt_aws_access_key():
+    return config.alt_access_key
+
+def get_alt_aws_secret_key():
+    return config.alt_secret_key
+
+def get_alt_display_name():
+    return config.alt_display_name
+
+def get_alt_user_id():
+    return config.alt_user_id
+
+def get_alt_email():
+    return config.alt_email
+
+def get_tenant_aws_access_key():
+    return config.tenant_access_key
+
+def get_tenant_aws_secret_key():
+    return config.tenant_secret_key
+
+def get_tenant_display_name():
+    return config.tenant_display_name
+
+def get_tenant_user_id():
+    return config.tenant_user_id
+
+def get_tenant_email():
+    return config.tenant_email
+
+def get_thumbprint():
+    return config.webidentity_thumbprint
+
+def get_aud():
+    return config.webidentity_aud
+
+def get_sub():
+    return config.webidentity_sub
+
+def get_azp():
+    return config.webidentity_azp
+
+def get_token():
+    return config.webidentity_token
+
+def get_realm_name():
+    return config.webidentity_realm
+
+def get_iam_name_prefix():
+    return config.iam_name_prefix
+
+def make_iam_name(name):
+    return config.iam_name_prefix + name
+
+def get_iam_path_prefix():
+    return config.iam_path_prefix
+
+def get_iam_access_key():
+    return config.iam_access_key
+
+def get_iam_secret_key():
+    return config.iam_secret_key
+
+def get_iam_root_user_id():
+    return config.iam_root_user_id
+
+def get_iam_root_email():
+    return config.iam_root_email
+
+def get_iam_alt_root_user_id():
+    return config.iam_alt_root_user_id
+
+def get_iam_alt_root_email():
+    return config.iam_alt_root_email
+
+def get_user_token():
+    return config.webidentity_user_token
+
+def get_cloud_storage_class():
+    return config.cloud_storage_class
+
+def get_cloud_retain_head_object():
+    return config.cloud_retain_head_object
+
+def get_cloud_regular_storage_class():
+    return config.cloud_regular_storage_class
+
+def get_cloud_target_path():
+    return config.cloud_target_path
+
+def get_cloud_target_storage_class():
+    return config.cloud_target_storage_class
+
+def get_lc_debug_interval():
+    return config.lc_debug_interval
diff --git a/s3tests/functional/iam.py b/s3tests/functional/iam.py
new file mode 100644 (file)
index 0000000..a070e5d
--- /dev/null
@@ -0,0 +1,199 @@
+from botocore.exceptions import ClientError
+import pytest
+
+from . import (
+    configfile,
+    get_iam_root_client,
+    get_iam_root_user_id,
+    get_iam_root_email,
+    get_iam_alt_root_client,
+    get_iam_alt_root_user_id,
+    get_iam_alt_root_email,
+    get_iam_path_prefix,
+)
+
+def nuke_user_keys(client, name):
+    p = client.get_paginator('list_access_keys')
+    for response in p.paginate(UserName=name):
+        for key in response['AccessKeyMetadata']:
+            try:
+                client.delete_access_key(UserName=name, AccessKeyId=key['AccessKeyId'])
+            except:
+                pass
+
+def nuke_user_policies(client, name):
+    p = client.get_paginator('list_user_policies')
+    for response in p.paginate(UserName=name):
+        for policy in response['PolicyNames']:
+            try:
+                client.delete_user_policy(UserName=name, PolicyName=policy)
+            except:
+                pass
+
+def nuke_attached_user_policies(client, name):
+    p = client.get_paginator('list_attached_user_policies')
+    for response in p.paginate(UserName=name):
+        for policy in response['AttachedPolicies']:
+            try:
+                client.detach_user_policy(UserName=name, PolicyArn=policy['PolicyArn'])
+            except:
+                pass
+
+def nuke_user(client, name):
+    # delete access keys, user policies, etc
+    try:
+        nuke_user_keys(client, name)
+    except:
+        pass
+    try:
+        nuke_user_policies(client, name)
+    except:
+        pass
+    try:
+        nuke_attached_user_policies(client, name)
+    except:
+        pass
+    client.delete_user(UserName=name)
+
+def nuke_users(client, **kwargs):
+    p = client.get_paginator('list_users')
+    for response in p.paginate(**kwargs):
+        for user in response['Users']:
+            try:
+                nuke_user(client, user['UserName'])
+            except:
+                pass
+
+def nuke_group_policies(client, name):
+    p = client.get_paginator('list_group_policies')
+    for response in p.paginate(GroupName=name):
+        for policy in response['PolicyNames']:
+            try:
+                client.delete_group_policy(GroupName=name, PolicyName=policy)
+            except:
+                pass
+
+def nuke_attached_group_policies(client, name):
+    p = client.get_paginator('list_attached_group_policies')
+    for response in p.paginate(GroupName=name):
+        for policy in response['AttachedPolicies']:
+            try:
+                client.detach_group_policy(GroupName=name, PolicyArn=policy['PolicyArn'])
+            except:
+                pass
+
+def nuke_group_users(client, name):
+    p = client.get_paginator('get_group')
+    for response in p.paginate(GroupName=name):
+        for user in response['Users']:
+            try:
+                client.remove_user_from_group(GroupName=name, UserName=user['UserName'])
+            except:
+                pass
+
+def nuke_group(client, name):
+    # delete group policies and remove all users
+    try:
+        nuke_group_policies(client, name)
+    except:
+        pass
+    try:
+        nuke_attached_group_policies(client, name)
+    except:
+        pass
+    try:
+        nuke_group_users(client, name)
+    except:
+        pass
+    client.delete_group(GroupName=name)
+
+def nuke_groups(client, **kwargs):
+    p = client.get_paginator('list_groups')
+    for response in p.paginate(**kwargs):
+        for user in response['Groups']:
+            try:
+                nuke_group(client, user['GroupName'])
+            except:
+                pass
+
+def nuke_role_policies(client, name):
+    p = client.get_paginator('list_role_policies')
+    for response in p.paginate(RoleName=name):
+        for policy in response['PolicyNames']:
+            try:
+                client.delete_role_policy(RoleName=name, PolicyName=policy)
+            except:
+                pass
+
+def nuke_attached_role_policies(client, name):
+    p = client.get_paginator('list_attached_role_policies')
+    for response in p.paginate(RoleName=name):
+        for policy in response['AttachedPolicies']:
+            try:
+                client.detach_role_policy(RoleName=name, PolicyArn=policy['PolicyArn'])
+            except:
+                pass
+
+def nuke_role(client, name):
+    # delete role policies, etc
+    try:
+        nuke_role_policies(client, name)
+    except:
+        pass
+    try:
+        nuke_attached_role_policies(client, name)
+    except:
+        pass
+    client.delete_role(RoleName=name)
+
+def nuke_roles(client, **kwargs):
+    p = client.get_paginator('list_roles')
+    for response in p.paginate(**kwargs):
+        for role in response['Roles']:
+            try:
+                nuke_role(client, role['RoleName'])
+            except:
+                pass
+
+def nuke_oidc_providers(client, prefix):
+    result = client.list_open_id_connect_providers()
+    for provider in result['OpenIDConnectProviderList']:
+        arn = provider['Arn']
+        if f':oidc-provider{prefix}' in arn:
+            try:
+                client.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+            except:
+                pass
+
+
+# fixture for iam account root user
+@pytest.fixture
+def iam_root(configfile):
+    client = get_iam_root_client()
+    try:
+        arn = client.get_user()['User']['Arn']
+        if not arn.endswith(':root'):
+            pytest.skip('[iam root] user does not have :root arn')
+    except ClientError as e:
+        pytest.skip('[iam root] user does not belong to an account')
+
+    yield client
+    nuke_users(client, PathPrefix=get_iam_path_prefix())
+    nuke_groups(client, PathPrefix=get_iam_path_prefix())
+    nuke_roles(client, PathPrefix=get_iam_path_prefix())
+    nuke_oidc_providers(client, get_iam_path_prefix())
+
+# fixture for iam alt account root user
+@pytest.fixture
+def iam_alt_root(configfile):
+    client = get_iam_alt_root_client()
+    try:
+        arn = client.get_user()['User']['Arn']
+        if not arn.endswith(':root'):
+            pytest.skip('[iam alt root] user does not have :root arn')
+    except ClientError as e:
+        pytest.skip('[iam alt root] user does not belong to an account')
+
+    yield client
+    nuke_users(client, PathPrefix=get_iam_path_prefix())
+    nuke_roles(client, PathPrefix=get_iam_path_prefix())
diff --git a/s3tests/functional/policy.py b/s3tests/functional/policy.py
new file mode 100644 (file)
index 0000000..123496a
--- /dev/null
@@ -0,0 +1,46 @@
+import json
+
+class Statement(object):
+    def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
+        self.principal = principal
+        self.action = action
+        self.resource = resource
+        self.condition = condition
+        self.effect = effect
+
+    def to_dict(self):
+        d = { "Action" : self.action,
+              "Principal" : self.principal,
+              "Effect" : self.effect,
+              "Resource" : self.resource
+        }
+
+        if self.condition is not None:
+            d["Condition"] = self.condition
+
+        return d
+
+class Policy(object):
+    def __init__(self):
+        self.statements = []
+
+    def add_statement(self, s):
+        self.statements.append(s)
+        return self
+
+    def to_json(self):
+        policy_dict = {
+            "Version" : "2012-10-17",
+            "Statement":
+            [s.to_dict() for s in self.statements]
+        }
+
+        return json.dumps(policy_dict)
+
+def make_json_policy(action, resource, principal={"AWS": "*"}, effect="Allow", conditions=None):
+    """
+    Helper function to make single statement policies
+    """
+    s = Statement(action, resource, principal, effect=effect, condition=conditions)
+    p = Policy()
+    return p.add_statement(s).to_json()
diff --git a/s3tests/functional/rgw_interactive.py b/s3tests/functional/rgw_interactive.py
new file mode 100644 (file)
index 0000000..873a145
--- /dev/null
@@ -0,0 +1,92 @@
+#!/usr/bin/python
+import boto3
+import os
+import random
+import string
+import itertools
+
+host = "localhost"
+port = 8000
+
+## AWS access key
+access_key = "0555b35654ad1656d804"
+
+## AWS secret key
+secret_key = "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+
+prefix = "YOURNAMEHERE-1234-"
+
+endpoint_url = "http://%s:%d" % (host, port)
+
+client = boto3.client(service_name='s3',
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key,
+                    endpoint_url=endpoint_url,
+                    use_ssl=False,
+                    verify=False)
+
+s3 = boto3.resource('s3', 
+                    use_ssl=False,
+                    verify=False,
+                    endpoint_url=endpoint_url, 
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key)
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+bucket_counter = itertools.count(1)
+
+def get_new_bucket_name():
+    """
+    Get a bucket name that probably does not exist.
+
+    We make every attempt to use a unique random prefix, so if a
+    bucket by this name happens to exist, it's ok if tests give
+    false negatives.
+    """
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    return name
+
+def get_new_bucket(session=boto3, name=None, headers=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    s3 = session.resource('s3', 
+                        use_ssl=False,
+                        verify=False,
+                        endpoint_url=endpoint_url, 
+                        aws_access_key_id=access_key,
+                        aws_secret_access_key=secret_key)
+    if name is None:
+        name = get_new_bucket_name()
+    bucket = s3.Bucket(name)
+    bucket_location = bucket.create()
+    return bucket
diff --git a/s3tests/functional/test_headers.py b/s3tests/functional/test_headers.py
new file mode 100644 (file)
index 0000000..66cabe5
--- /dev/null
@@ -0,0 +1,572 @@
+import boto3
+import pytest
+from botocore.exceptions import ClientError
+from email.utils import formatdate
+
+from .utils import assert_raises
+from .utils import _get_status_and_error_code
+from .utils import _get_status
+
+from . import (
+    configfile,
+    setup_teardown,
+    get_client,
+    get_v2_client,
+    get_new_bucket,
+    get_new_bucket_name,
+    )
+
+def _add_header_create_object(headers, client=None):
+    """ Create a new bucket, add an object w/header customizations
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', add_headers)
+    client.put_object(Bucket=bucket_name, Key=key_name)
+
+    return bucket_name, key_name
+
+
+def _add_header_create_bad_object(headers, client=None):
+    """ Create a new bucket, add an object with a header. This should cause a failure 
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', add_headers)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
+
+    return e
+
+
+def _remove_header_create_object(remove, client=None):
+    """ Create a new bucket, add an object without a header
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObject', remove_header)
+    client.put_object(Bucket=bucket_name, Key=key_name)
+
+    return bucket_name, key_name
+
+def _remove_header_create_bad_object(remove, client=None):
+    """ Create a new bucket, add an object without a header. This should cause a failure
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObject', remove_header)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
+
+    return e
+
+
+def _add_header_create_bucket(headers, client=None):
+    """ Create a new bucket, w/header customizations
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    client.create_bucket(Bucket=bucket_name)
+
+    return bucket_name
+
+
+def _add_header_create_bad_bucket(headers=None, client=None):
+    """ Create a new bucket, w/header customizations that should cause a failure 
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+
+    return e
+
+
+def _remove_header_create_bucket(remove, client=None):
+    """ Create a new bucket, without a header
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
+    client.create_bucket(Bucket=bucket_name)
+
+    return bucket_name
+
+def _remove_header_create_bad_bucket(remove, client=None):
+    """ Create a new bucket, without a header. This should cause a failure
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+
+    return e
+
+#
+# common tests
+#
+
+@pytest.mark.auth_common
+def test_object_create_bad_md5_invalid_short():
+    e = _add_header_create_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidDigest'
+
+@pytest.mark.auth_common
+def test_object_create_bad_md5_bad():
+    e = _add_header_create_bad_object({'Content-MD5':'rL0Y20xC+Fzt72VPzMSk2A=='})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'BadDigest'
+
+@pytest.mark.auth_common
+def test_object_create_bad_md5_empty():
+    e = _add_header_create_bad_object({'Content-MD5':''})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidDigest'
+
+@pytest.mark.auth_common
+def test_object_create_bad_md5_none():
+    bucket_name, key_name = _remove_header_create_object('Content-MD5')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+def test_object_create_bad_expect_mismatch():
+    bucket_name, key_name = _add_header_create_object({'Expect': 200})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+def test_object_create_bad_expect_empty():
+    bucket_name, key_name = _add_header_create_object({'Expect': ''})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+def test_object_create_bad_expect_none():
+    bucket_name, key_name = _remove_header_create_object('Expect')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_contentlength_empty():
+    e = _add_header_create_bad_object({'Content-Length':''})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.auth_common
+@pytest.mark.fails_on_mod_proxy_fcgi
+def test_object_create_bad_contentlength_negative():
+    client = get_client()
+    bucket_name = get_new_bucket()
+    key_name = 'foo'
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, ContentLength=-1)
+    status = _get_status(e.response)
+    assert status == 400
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_contentlength_none():
+    remove = 'Content-Length'
+    e = _remove_header_create_bad_object('Content-Length')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 411
+    assert error_code == 'MissingContentLength'
+
+@pytest.mark.auth_common
+def test_object_create_bad_contenttype_invalid():
+    bucket_name, key_name = _add_header_create_object({'Content-Type': 'text/plain'})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+def test_object_create_bad_contenttype_empty():
+    client = get_client()
+    key_name = 'foo'
+    bucket_name = get_new_bucket()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar', ContentType='')
+
+@pytest.mark.auth_common
+def test_object_create_bad_contenttype_none():
+    bucket_name = get_new_bucket()
+    key_name = 'foo'
+    client = get_client()
+    # as long as ContentType isn't specified in put_object it isn't going into the request
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_authorization_empty():
+    e = _add_header_create_bad_object({'Authorization': ''})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
+@pytest.mark.fails_on_rgw
+def test_object_create_date_and_amz_date():
+    date = formatdate(usegmt=True)
+    bucket_name, key_name = _add_header_create_object({'Date': date, 'X-Amz-Date': date})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
+@pytest.mark.fails_on_rgw
+def test_object_create_amz_date_and_no_date():
+    date = formatdate(usegmt=True)
+    bucket_name, key_name = _add_header_create_object({'Date': '', 'X-Amz-Date': date})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+# the teardown is really messed up here. check it out
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_authorization_none():
+    e = _remove_header_create_bad_object('Authorization')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_contentlength_none():
+    remove = 'Content-Length'
+    _remove_header_create_bucket(remove)
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@pytest.mark.fails_on_rgw
+def test_object_acl_create_contentlength_none():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    remove = 'Content-Length'
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObjectAcl', remove_header)
+    client.put_object_acl(Bucket=bucket_name, Key='foo', ACL='public-read')
+
+@pytest.mark.auth_common
+def test_bucket_put_bad_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    headers = {'x-amz-acl': 'public-ready'}
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutBucketAcl', add_headers)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, ACL='public-read')
+    status = _get_status(e.response)
+    assert status == 400
+
+@pytest.mark.auth_common
+def test_bucket_create_bad_expect_mismatch():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    headers = {'Expect': 200}
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    client.create_bucket(Bucket=bucket_name)
+
+@pytest.mark.auth_common
+def test_bucket_create_bad_expect_empty():
+    headers = {'Expect': ''}
+    _add_header_create_bucket(headers)
+
+@pytest.mark.auth_common
+# TODO: The request isn't even making it to the RGW past the frontend
+# This test had 'fails_on_rgw' before the move to boto3
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_contentlength_empty():
+    headers = {'Content-Length': ''}
+    e = _add_header_create_bad_bucket(headers)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.auth_common
+@pytest.mark.fails_on_mod_proxy_fcgi
+def test_bucket_create_bad_contentlength_negative():
+    headers = {'Content-Length': '-1'}
+    e = _add_header_create_bad_bucket(headers)
+    status = _get_status(e.response)
+    assert status == 400
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_contentlength_none():
+    remove = 'Content-Length'
+    _remove_header_create_bucket(remove)
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_authorization_empty():
+    headers = {'Authorization': ''}
+    e = _add_header_create_bad_bucket(headers)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_common
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_authorization_none():
+    e = _remove_header_create_bad_bucket('Authorization')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_md5_invalid_garbage_aws2():
+    v2_client = get_v2_client()
+    headers = {'Content-MD5': 'AWS HAHAHA'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidDigest'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the Content-Length header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_contentlength_mismatch_below_aws2():
+    v2_client = get_v2_client()
+    content = 'bar'
+    length = len(content) - 1
+    headers = {'Content-Length': str(length)}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'BadDigest'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_authorization_incorrect_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'InvalidDigest'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_authorization_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS HAHAHA'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_ua_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'User-Agent': ''}
+    bucket_name, key_name = _add_header_create_object(headers, v2_client)
+    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_ua_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'User-Agent'
+    bucket_name, key_name = _remove_header_create_object(remove, v2_client)
+    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_date_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Bad Date'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_date_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': ''}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@pytest.mark.fails_on_rgw
+def test_object_create_bad_date_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'x-amz-date'
+    e = _remove_header_create_bad_object(remove, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_date_before_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'RequestTimeTooSkewed'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_date_before_epoch_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_object_create_bad_date_after_end_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 9999 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'RequestTimeTooSkewed'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_authorization_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS HAHAHA'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_ua_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'User-Agent': ''}
+    _add_header_create_bucket(headers, v2_client)
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_ua_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'User-Agent'
+    _remove_header_create_bucket(remove, v2_client)
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_date_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Bad Date'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_date_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': ''}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@pytest.mark.fails_on_rgw
+def test_bucket_create_bad_date_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'x-amz-date'
+    e = _remove_header_create_bad_bucket(remove, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_date_before_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'RequestTimeTooSkewed'
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_date_after_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2030 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'RequestTimeTooSkewed'
+
+@pytest.mark.auth_aws2
+def test_bucket_create_bad_date_before_epoch_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
diff --git a/s3tests/functional/test_iam.py b/s3tests/functional/test_iam.py
new file mode 100644 (file)
index 0000000..4e4054b
--- /dev/null
@@ -0,0 +1,2803 @@
+import json
+import datetime
+import time
+
+from botocore.exceptions import ClientError
+import pytest
+
+from .utils import assert_raises
+from .test_s3 import _multipart_upload
+from . import (
+    configfile,
+    setup_teardown,
+    get_alt_client,
+    get_iam_client,
+    get_iam_root_client,
+    get_iam_alt_root_client,
+    get_iam_alt_root_user_id,
+    get_iam_alt_root_email,
+    make_iam_name,
+    get_iam_path_prefix,
+    get_new_bucket,
+    get_new_bucket_name,
+    get_iam_s3client,
+    get_alt_iam_client,
+    get_alt_user_id,
+    get_sts_client,
+)
+from .utils import _get_status, _get_status_and_error_code
+from .iam import iam_root, iam_alt_root
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_put_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_put_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    assert status == 404
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_put_user_policy_parameter_limit():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}] * 1000
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy' * 10, UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 400
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_rgw
+def test_put_user_policy_invalid_element():
+    client = get_iam_client()
+
+    # With Version other than 2012-10-17
+    policy_document = json.dumps(
+        {"Version": "2010-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 400
+
+    # With no Statement
+    policy_document = json.dumps(
+        {
+            "Version": "2012-10-17",
+        }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 400
+
+    # with same Sid for 2 statements
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [
+             {"Sid": "98AB54CF",
+              "Effect": "Allow",
+              "Action": "*",
+              "Resource": "*"},
+             {"Sid": "98AB54CF",
+              "Effect": "Allow",
+              "Action": "*",
+              "Resource": "*"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 400
+
+    # with Principal
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*",
+             "Principal": "arn:aws:iam:::username"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 400
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_put_existing_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}
+         }
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_list_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}
+         }
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.list_user_policies(UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_list_user_policy_invalid_user():
+    client = get_iam_client()
+    e = assert_raises(ClientError, client.list_user_policies, UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    assert status == 404
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_get_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.get_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_get_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
+                      UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    assert status == 404
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_rgw
+def test_get_user_policy_invalid_policy_name():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='non-existing-policy-name',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 404
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_rgw
+def test_get_deleted_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 404
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_get_user_policy_from_multiple_policies():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy1',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.get_user_policy(PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_delete_user_policy():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_delete_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='AllAccessPolicy',
+                      UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    assert status == 404
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_delete_user_policy_invalid_policy_name():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='non-existing-policy-name',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    assert status == 404
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_delete_user_policy_from_multiple_policies():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy1',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy3',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.get_user_policy(PolicyName='AllowAccessPolicy3',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy3',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_allow_bucket_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+    s3_client_iam.put_object(Bucket=bucket, Key='foo', Body='bar')
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:ListBucket", "s3:DeleteBucket"],
+             "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = s3_client_alt.list_objects(Bucket=bucket)
+    object_found = False
+    for object_received in response['Contents']:
+        if "foo" == object_received['Key']:
+            object_found = True
+            break
+    if not object_found:
+        raise AssertionError("Object is not listed")
+
+    response = s3_client_iam.delete_object(Bucket=bucket, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response = s3_client_alt.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response = s3_client_iam.list_buckets()
+    for bucket in response['Buckets']:
+        if bucket == bucket['Name']:
+            raise AssertionError("deleted bucket is getting listed")
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_deny_bucket_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:ListAllMyBuckets", "s3:DeleteBucket"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_deny,
+                                      PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    e = assert_raises(ClientError, s3_client.list_buckets, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    e = assert_raises(ClientError, s3_client.delete_bucket, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = s3_client.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_allow_object_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
+             "Resource": f"arn:aws:s3:::{bucket}/*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
+    response = s3_client_alt.get_object(Bucket=bucket, Key='foo')
+    body = response['Body'].read()
+    if type(body) is bytes:
+        body = body.decode()
+    assert body == "bar"
+    response = s3_client_alt.delete_object(Bucket=bucket, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    e = assert_raises(ClientError, s3_client_iam.get_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_deny_object_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    bucket = get_new_bucket(client=s3_client_alt)
+    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Deny",
+             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
+             "Resource": f"arn:aws:s3:::{bucket}/*"}, {
+             "Effect": "Allow",
+             "Action": ["s3:DeleteBucket"],
+             "Resource": f"arn:aws:s3:::{bucket}"}]}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
+                           UserName=get_alt_user_id())
+
+    e = assert_raises(ClientError, s3_client_alt.put_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    e = assert_raises(ClientError, s3_client_alt.get_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    e = assert_raises(ClientError, s3_client_alt.delete_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_allow_multipart_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    key = "mymultipart"
+    mb = 1024 * 1024
+
+    (upload_id, _, _) = _multipart_upload(client=s3_client_iam, bucket_name=bucket, key=key,
+                                          size=5 * mb)
+    response = s3_client_alt.list_multipart_uploads(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = s3_client_alt.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_deny_multipart_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_deny,
+                                      PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    key = "mymultipart"
+    mb = 1024 * 1024
+
+    (upload_id, _, _) = _multipart_upload(client=s3_client, bucket_name=bucket, key=key,
+                                          size=5 * mb)
+
+    e = assert_raises(ClientError, s3_client.list_multipart_uploads, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    e = assert_raises(ClientError, s3_client.abort_multipart_upload, Bucket=bucket,
+                      Key=key, UploadId=upload_id)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = s3_client.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_allow_tagging_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
+                        "s3:PutObjectTagging", "s3:GetObjectTagging"],
+             "Resource": f"arn:aws:s3:::*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy',
+                           UserName=get_alt_user_id())
+    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
+
+    response = s3_client_alt.put_bucket_tagging(Bucket=bucket, Tagging=tags)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = s3_client_alt.get_bucket_tagging(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['TagSet'][0]['Key'] == 'Hello'
+    assert response['TagSet'][0]['Value'] == 'World'
+
+    obj_key = 'obj'
+    response = s3_client_iam.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = s3_client_alt.put_object_tagging(Bucket=bucket, Key=obj_key, Tagging=tags)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = s3_client_alt.get_object_tagging(Bucket=bucket, Key=obj_key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['TagSet'] == tags['TagSet']
+
+    response = s3_client_iam.delete_object(Bucket=bucket, Key=obj_key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_deny_tagging_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
+                        "s3:PutObjectTagging", "s3:DeleteObjectTagging"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
+                           UserName=get_alt_user_id())
+    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
+
+    e = assert_raises(ClientError, s3_client.put_bucket_tagging, Bucket=bucket, Tagging=tags)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    e = assert_raises(ClientError, s3_client.get_bucket_tagging, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    obj_key = 'obj'
+    response = s3_client.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    e = assert_raises(ClientError, s3_client.put_object_tagging, Bucket=bucket, Key=obj_key,
+                      Tagging=tags)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    e = assert_raises(ClientError, s3_client.delete_object_tagging, Bucket=bucket, Key=obj_key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = s3_client.delete_object(Bucket=bucket, Key=obj_key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = s3_client.delete_bucket(Bucket=bucket)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_verify_conflicting_user_policy_statements():
+    s3client = get_alt_client()
+    bucket = get_new_bucket(client=s3client)
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [
+             {"Sid": "98AB54CG",
+              "Effect": "Allow",
+              "Action": "s3:ListBucket",
+              "Resource": f"arn:aws:s3:::{bucket}"},
+             {"Sid": "98AB54CA",
+              "Effect": "Deny",
+              "Action": "s3:ListBucket",
+              "Resource": f"arn:aws:s3:::{bucket}"}
+         ]}
+    )
+    client = get_iam_client()
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+@pytest.mark.fails_on_dbstore
+def test_verify_conflicting_user_policies():
+    s3client = get_alt_client()
+    bucket = get_new_bucket(client=s3client)
+    policy_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CG",
+                       "Effect": "Allow",
+                       "Action": "s3:ListBucket",
+                       "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+    policy_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CGZ",
+                       "Effect": "Deny",
+                       "Action": "s3:ListBucket",
+                       "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+    client = get_iam_client()
+    response = client.put_user_policy(PolicyDocument=policy_allow, PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.put_user_policy(PolicyDocument=policy_deny, PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.user_policy
+@pytest.mark.iam_tenant
+def test_verify_allow_iam_actions():
+    policy1 = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CGA",
+                       "Effect": "Allow",
+                       "Action": ["iam:PutUserPolicy", "iam:GetUserPolicy",
+                                  "iam:ListUserPolicies", "iam:DeleteUserPolicy"],
+                       "Resource": f"arn:aws:iam:::user/{get_alt_user_id()}"}}
+    )
+    client1 = get_iam_client()
+    iam_client_alt = get_alt_iam_client()
+
+    response = client1.put_user_policy(PolicyDocument=policy1, PolicyName='AllowAccessPolicy',
+                                       UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = iam_client_alt.get_user_policy(PolicyName='AllowAccessPolicy',
+                                       UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = iam_client_alt.list_user_policies(UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = iam_client_alt.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                          UserName=get_alt_user_id())
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+# IAM User apis
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_create(iam_root):
+    path = get_iam_path_prefix()
+    name1 = make_iam_name('U1')
+    response = iam_root.create_user(UserName=name1, Path=path)
+    user = response['User']
+    assert user['Path'] == path
+    assert user['UserName'] == name1
+    assert len(user['UserId'])
+    assert user['Arn'].startswith('arn:aws:iam:')
+    assert user['Arn'].endswith(f':user{path}{name1}')
+    assert user['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+
+    path2 = get_iam_path_prefix() + 'foo/'
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_user(UserName=name1, Path=path2)
+
+    name2 = make_iam_name('U2')
+    response = iam_root.create_user(UserName=name2, Path=path2)
+    user = response['User']
+    assert user['Path'] == path2
+    assert user['UserName'] == name2
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_case_insensitive_name(iam_root):
+    path = get_iam_path_prefix()
+    name_upper = make_iam_name('U1')
+    name_lower = make_iam_name('u1')
+    response = iam_root.create_user(UserName=name_upper, Path=path)
+    user = response['User']
+
+    # name is case-insensitive, so 'u1' should also conflict
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_user(UserName=name_lower)
+
+    # search for 'u1' should return the same 'U1' user
+    response = iam_root.get_user(UserName=name_lower)
+    assert user == response['User']
+
+    # delete for 'u1' should delete the same 'U1' user
+    iam_root.delete_user(UserName=name_lower)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_user(UserName=name_lower)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_delete(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('U1')
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_user(UserName=name)
+
+    response = iam_root.create_user(UserName=name, Path=path)
+    uid = response['User']['UserId']
+    create_date = response['User']['CreateDate']
+
+    iam_root.delete_user(UserName=name)
+
+    response = iam_root.create_user(UserName=name, Path=path)
+    assert uid != response['User']['UserId']
+    assert create_date <= response['User']['CreateDate']
+
+def user_list_names(client, **kwargs):
+    p = client.get_paginator('list_users')
+    usernames = []
+    for response in p.paginate(**kwargs):
+        usernames += [u['UserName'] for u in response['Users']]
+    return usernames
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_list(iam_root):
+    path = get_iam_path_prefix()
+    response = iam_root.list_users(PathPrefix=path)
+    assert len(response['Users']) == 0
+    assert response['IsTruncated'] == False
+
+    name1 = make_iam_name('aa')
+    name2 = make_iam_name('Ab')
+    name3 = make_iam_name('ac')
+    name4 = make_iam_name('Ad')
+
+    # sort order is independent of CreateDate, Path, and UserName capitalization
+    iam_root.create_user(UserName=name4, Path=path+'w/')
+    iam_root.create_user(UserName=name3, Path=path+'x/')
+    iam_root.create_user(UserName=name2, Path=path+'y/')
+    iam_root.create_user(UserName=name1, Path=path+'z/')
+
+    assert [name1, name2, name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path)
+    assert [name1, name2, name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_list_path_prefix(iam_root):
+    path = get_iam_path_prefix()
+    response = iam_root.list_users(PathPrefix=path)
+    assert len(response['Users']) == 0
+    assert response['IsTruncated'] == False
+
+    name1 = make_iam_name('a')
+    name2 = make_iam_name('b')
+    name3 = make_iam_name('c')
+    name4 = make_iam_name('d')
+
+    iam_root.create_user(UserName=name1, Path=path)
+    iam_root.create_user(UserName=name2, Path=path)
+    iam_root.create_user(UserName=name3, Path=path+'a/')
+    iam_root.create_user(UserName=name4, Path=path+'a/x/')
+
+    assert [name1, name2, name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path)
+    assert [name1, name2, name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path,
+                            PaginationConfig={'PageSize': 1})
+    assert [name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path+'a')
+    assert [name3, name4] == \
+            user_list_names(iam_root, PathPrefix=path+'a',
+                            PaginationConfig={'PageSize': 1})
+    assert [name4] == \
+            user_list_names(iam_root, PathPrefix=path+'a/x')
+    assert [name4] == \
+            user_list_names(iam_root, PathPrefix=path+'a/x',
+                            PaginationConfig={'PageSize': 1})
+    assert [] == user_list_names(iam_root, PathPrefix=path+'a/x/d')
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_update_name(iam_root):
+    path = get_iam_path_prefix()
+    name1 = make_iam_name('a')
+    new_name1 = make_iam_name('z')
+    name2 = make_iam_name('b')
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.update_user(UserName=name1, NewUserName=new_name1)
+
+    iam_root.create_user(UserName=name1, Path=path)
+    iam_root.create_user(UserName=name2, Path=path+'m/')
+    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
+
+    response = iam_root.get_user(UserName=name1)
+    assert name1 == response['User']['UserName']
+    uid = response['User']['UserId']
+
+    iam_root.update_user(UserName=name1, NewUserName=new_name1)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_user(UserName=name1)
+
+    response = iam_root.get_user(UserName=new_name1)
+    assert new_name1 == response['User']['UserName']
+    assert uid == response['User']['UserId']
+    assert response['User']['Arn'].endswith(f':user{path}{new_name1}')
+
+    assert [name2, new_name1] == user_list_names(iam_root, PathPrefix=path)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_update_path(iam_root):
+    path = get_iam_path_prefix()
+    name1 = make_iam_name('a')
+    name2 = make_iam_name('b')
+    iam_root.create_user(UserName=name1, Path=path)
+    iam_root.create_user(UserName=name2, Path=path+'m/')
+    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
+
+    response = iam_root.get_user(UserName=name1)
+    assert name1 == response['User']['UserName']
+    assert path == response['User']['Path']
+    uid = response['User']['UserId']
+
+    iam_root.update_user(UserName=name1, NewPath=path+'z/')
+
+    response = iam_root.get_user(UserName=name1)
+    assert name1 == response['User']['UserName']
+    assert f'{path}z/' == response['User']['Path']
+    assert uid == response['User']['UserId']
+    assert response['User']['Arn'].endswith(f':user{path}z/{name1}')
+
+    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
+
+
+# IAM AccessKey apis
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_access_key_create(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('a')
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.create_access_key(UserName=name)
+
+    iam_root.create_user(UserName=name, Path=path)
+
+    response = iam_root.create_access_key(UserName=name)
+    key = response['AccessKey']
+    assert name == key['UserName']
+    assert len(key['AccessKeyId'])
+    assert len(key['SecretAccessKey'])
+    assert 'Active' == key['Status']
+    assert key['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_current_user_access_key_create(iam_root):
+    # omit the UserName argument to operate on the current authenticated
+    # user (assumed to be an account root user)
+
+    response = iam_root.create_access_key()
+    key = response['AccessKey']
+    keyid = key['AccessKeyId']
+    assert len(keyid)
+    try:
+        assert len(key['SecretAccessKey'])
+        assert 'Active' == key['Status']
+        assert key['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+    finally:
+        # iam_root doesn't see the account root user, so clean up
+        # this key manually
+        iam_root.delete_access_key(AccessKeyId=keyid)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_access_key_update(iam_root):
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.update_access_key(UserName='nosuchuser', AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
+
+    path = get_iam_path_prefix()
+    name = make_iam_name('a')
+    iam_root.create_user(UserName=name, Path=path)
+
+    response = iam_root.create_access_key(UserName=name)
+    key = response['AccessKey']
+    keyid = key['AccessKeyId']
+    create_date = key['CreateDate']
+    assert create_date > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.update_access_key(UserName=name, AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
+
+    iam_root.update_access_key(UserName=name, AccessKeyId=keyid, Status='Active')
+    iam_root.update_access_key(UserName=name, AccessKeyId=keyid, Status='Inactive')
+
+    response = iam_root.list_access_keys(UserName=name)
+    keys = response['AccessKeyMetadata']
+    assert 1 == len(keys)
+    key = keys[0]
+    assert name == key['UserName']
+    assert keyid == key['AccessKeyId']
+    assert 'Inactive' == key['Status']
+    assert create_date == key['CreateDate'] # CreateDate unchanged by update_access_key()
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_current_user_access_key_update(iam_root):
+    # omit the UserName argument to operate on the current authenticated
+    # user (assumed to be an account root user)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.update_access_key(AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
+
+    response = iam_root.create_access_key()
+    key = response['AccessKey']
+    keyid = key['AccessKeyId']
+    assert len(keyid)
+    try:
+        iam_root.update_access_key(AccessKeyId=keyid, Status='Active')
+        iam_root.update_access_key(AccessKeyId=keyid, Status='Inactive')
+
+        # find the access key id we created
+        p = iam_root.get_paginator('list_access_keys')
+        for response in p.paginate():
+            for key in response['AccessKeyMetadata']:
+                if keyid == key['AccessKeyId']:
+                    assert 'Inactive' == key['Status']
+                    return
+        assert False, f'AccessKeyId={keyid} not found in list_access_keys()'
+
+    finally:
+        # iam_root doesn't see the account root user, so clean up
+        # this key manually
+        iam_root.delete_access_key(AccessKeyId=keyid)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_access_key_delete(iam_root):
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_access_key(UserName='nosuchuser', AccessKeyId='abcdefghijklmnopqrstu')
+
+    path = get_iam_path_prefix()
+    name = make_iam_name('a')
+    iam_root.create_user(UserName=name, Path=path)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_access_key(UserName=name, AccessKeyId='abcdefghijklmnopqrstu')
+
+    response = iam_root.create_access_key(UserName=name)
+    keyid = response['AccessKey']['AccessKeyId']
+
+    iam_root.delete_access_key(UserName=name, AccessKeyId=keyid)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_access_key(UserName=name, AccessKeyId=keyid)
+
+    response = iam_root.list_access_keys(UserName=name)
+    keys = response['AccessKeyMetadata']
+    assert 0 == len(keys)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_current_user_access_key_delete(iam_root):
+    # omit the UserName argument to operate on the current authenticated
+    # user (assumed to be an account root user)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_access_key(AccessKeyId='abcdefghijklmnopqrstu')
+
+    response = iam_root.create_access_key()
+    keyid = response['AccessKey']['AccessKeyId']
+
+    iam_root.delete_access_key(AccessKeyId=keyid)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_access_key(AccessKeyId=keyid)
+
+    # make sure list_access_keys() doesn't return the access key id we deleted
+    p = iam_root.get_paginator('list_access_keys')
+    for response in p.paginate():
+        for key in response['AccessKeyMetadata']:
+            assert keyid != key['AccessKeyId']
+
+def user_list_key_ids(client, **kwargs):
+    p = client.get_paginator('list_access_keys')
+    ids = []
+    for response in p.paginate(**kwargs):
+        ids += [k['AccessKeyId'] for k in response['AccessKeyMetadata']]
+    return ids
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_access_key_list(iam_root):
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.list_access_keys(UserName='nosuchuser')
+
+    path = get_iam_path_prefix()
+    name = make_iam_name('a')
+    iam_root.create_user(UserName=name, Path=path)
+
+    assert [] == user_list_key_ids(iam_root, UserName=name)
+    assert [] == user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1})
+
+    id1 = iam_root.create_access_key(UserName=name)['AccessKey']['AccessKeyId']
+
+    assert [id1] == user_list_key_ids(iam_root, UserName=name)
+    assert [id1] == user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1})
+
+    id2 = iam_root.create_access_key(UserName=name)['AccessKey']['AccessKeyId']
+    # AccessKeysPerUser=2 is the default quota in aws
+
+    keys = sorted([id1, id2])
+    assert keys == sorted(user_list_key_ids(iam_root, UserName=name))
+    assert keys == sorted(user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1}))
+
+def retry_on(code, tries, func, *args, **kwargs):
+    for i in range(tries):
+        try:
+            return func(*args, **kwargs)
+        except ClientError as e:
+            err = e.response['Error']['Code']
+            if i + 1 < tries and err in code:
+                print(f'Got {err}, retrying in {i}s..')
+                time.sleep(i)
+                continue
+            raise
+
+
+@pytest.mark.iam_account
+@pytest.mark.iam_user
+def test_account_user_bucket_policy_allow(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    response = iam_root.create_user(UserName=name, Path=path)
+    user_arn = response['User']['Arn']
+    assert user_arn.startswith('arn:aws:iam:')
+    assert user_arn.endswith(f':user{path}{name}')
+
+    key = iam_root.create_access_key(UserName=name)['AccessKey']
+    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                              aws_secret_access_key=key['SecretAccessKey'])
+
+    # create a bucket with the root user
+    roots3 = get_iam_root_client(service_name='s3')
+    bucket = get_new_bucket(roots3)
+    try:
+        # the access key may take a bit to start working. retry until it returns
+        # something other than InvalidAccessKeyId
+        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_objects, Bucket=bucket)
+        # expect AccessDenied because no identity policy allows s3 actions
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a bucket policy that allows s3:ListBucket for the iam user's arn
+        policy = json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Principal': {'AWS': user_arn},
+                'Action': 's3:ListBucket',
+                'Resource': f'arn:aws:s3:::{bucket}'
+                }]
+            })
+        roots3.put_bucket_policy(Bucket=bucket, Policy=policy)
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, client.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+
+# IAM UserPolicy apis
+@pytest.mark.user_policy
+@pytest.mark.iam_account
+def test_account_user_policy(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    policy_name = 'List'
+    bucket_name = get_new_bucket_name()
+    policy1 = json.dumps({'Version': '2012-10-17', 'Statement': [
+        {'Effect': 'Deny',
+         'Action': 's3:ListBucket',
+         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
+    policy2 = json.dumps({'Version': '2012-10-17', 'Statement': [
+        {'Effect': 'Allow',
+         'Action': 's3:ListBucket',
+         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
+
+    # Get/Put/Delete fail on nonexistent UserName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy1)
+
+    iam_root.create_user(UserName=name, Path=path)
+
+    # Get/Delete fail on nonexistent PolicyName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
+
+    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy1)
+
+    response = iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
+    assert policy1 == json.dumps(response['PolicyDocument'])
+    response = iam_root.list_user_policies(UserName=name)
+    assert [policy_name] == response['PolicyNames']
+
+    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy2)
+
+    response = iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
+    assert policy2 == json.dumps(response['PolicyDocument'])
+    response = iam_root.list_user_policies(UserName=name)
+    assert [policy_name] == response['PolicyNames']
+
+    iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
+
+    # Get/Delete fail after Delete
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
+
+    response = iam_root.list_user_policies(UserName=name)
+    assert [] == response['PolicyNames']
+
+@pytest.mark.user_policy
+@pytest.mark.iam_account
+def test_account_user_policy_managed(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
+    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
+
+    # Attach/Detach/List fail on nonexistent UserName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_user_policy(UserName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.list_attached_user_policies(UserName=name)
+
+    iam_root.create_user(UserName=name, Path=path)
+
+    # Detach fails on unattached PolicyArn
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_user_policy(UserName=name, PolicyArn=policy1)
+
+    iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
+    iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
+
+    response = iam_root.list_attached_user_policies(UserName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    iam_root.attach_user_policy(UserName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_user_policies(UserName=name)
+    policies = response['AttachedPolicies']
+    assert len(policies) == 2
+    names = [p['PolicyName'] for p in policies]
+    arns = [p['PolicyArn'] for p in policies]
+    assert 'AmazonS3FullAccess' in names
+    assert policy1 in arns
+    assert 'AmazonS3ReadOnlyAccess' in names
+    assert policy2 in arns
+
+    iam_root.detach_user_policy(UserName=name, PolicyArn=policy2)
+
+    # Detach fails after Detach
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_user_policy(UserName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_user_policies(UserName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    # DeleteUser fails while policies are still attached
+    with pytest.raises(iam_root.exceptions.DeleteConflictException):
+        iam_root.delete_user(UserName=name)
+
+@pytest.mark.user_policy
+@pytest.mark.iam_account
+def test_account_user_policy_allow(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    bucket_name = get_new_bucket_name()
+    iam_root.create_user(UserName=name, Path=path)
+
+    key = iam_root.create_access_key(UserName=name)['AccessKey']
+    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                              aws_secret_access_key=key['SecretAccessKey'])
+
+    # the access key may take a bit to start working. retry until it returns
+    # something other than InvalidAccessKeyId
+    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
+    # expect AccessDenied because no identity policy allows s3 actions
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    # add a user policy that allows s3 actions
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:*',
+            'Resource': '*'
+            }]
+        })
+    policy_name = 'AllowStar'
+    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, client.list_buckets)
+
+
+def group_list_names(client, **kwargs):
+    p = client.get_paginator('list_groups')
+    names = []
+    for response in p.paginate(**kwargs):
+        names += [u['GroupName'] for u in response['Groups']]
+    return names
+
+# IAM Group apis
+@pytest.mark.group
+@pytest.mark.iam_account
+def test_account_group_create(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('G1')
+
+    assert [] == group_list_names(iam_root, PathPrefix=path)
+
+    response = iam_root.create_group(GroupName=name, Path=path)
+    group = response['Group']
+    assert path == group['Path']
+    assert name == group['GroupName']
+    assert len(group['GroupId'])
+    arn = group['Arn']
+    assert arn.startswith('arn:aws:iam:')
+    assert arn.endswith(f':group{path}{name}')
+
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_group(GroupName=name)
+
+    response = iam_root.get_group(GroupName=name)
+    assert group == response['Group']
+
+    assert [name] == group_list_names(iam_root, PathPrefix=path)
+
+    iam_root.delete_group(GroupName=name)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_group(GroupName=name)
+
+    assert [] == group_list_names(iam_root, PathPrefix=path)
+
+@pytest.mark.iam_account
+@pytest.mark.group
+def test_account_group_case_insensitive_name(iam_root):
+    path = get_iam_path_prefix()
+    name_upper = make_iam_name('G1')
+    name_lower = make_iam_name('g1')
+    response = iam_root.create_group(GroupName=name_upper, Path=path)
+    group = response['Group']
+
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_group(GroupName=name_lower)
+
+    response = iam_root.get_group(GroupName=name_lower)
+    assert group == response['Group']
+
+    iam_root.delete_group(GroupName=name_lower)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_group(GroupName=name_upper)
+
+@pytest.mark.iam_account
+@pytest.mark.group
+def test_account_group_list(iam_root):
+    path = get_iam_path_prefix()
+    response = iam_root.list_groups(PathPrefix=path)
+    assert len(response['Groups']) == 0
+    assert response['IsTruncated'] == False
+
+    name1 = make_iam_name('aa')
+    name2 = make_iam_name('Ab')
+    name3 = make_iam_name('ac')
+    name4 = make_iam_name('Ad')
+
+    # sort order is independent of Path and GroupName capitalization
+    iam_root.create_group(GroupName=name4, Path=path+'w/')
+    iam_root.create_group(GroupName=name3, Path=path+'x/')
+    iam_root.create_group(GroupName=name2, Path=path+'y/')
+    iam_root.create_group(GroupName=name1, Path=path+'z/')
+
+    assert [name1, name2, name3, name4] == \
+            group_list_names(iam_root, PathPrefix=path)
+    assert [name1, name2, name3, name4] == \
+            group_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
+
+@pytest.mark.group
+@pytest.mark.iam_account
+def test_account_group_update(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('G1')
+    response = iam_root.create_group(GroupName=name, Path=path)
+    group_id = response['Group']['GroupId']
+
+    username = make_iam_name('U1')
+    iam_root.create_user(UserName=username, Path=path)
+
+    iam_root.add_user_to_group(GroupName=name, UserName=username)
+
+    response = iam_root.list_groups_for_user(UserName=username)
+    groups = response['Groups']
+    assert len(groups) == 1
+    assert path == groups[0]['Path']
+    assert name == groups[0]['GroupName']
+    assert group_id == groups[0]['GroupId']
+
+    new_path = path + 'new/'
+    new_name = make_iam_name('NG1')
+    iam_root.update_group(GroupName=name, NewPath=new_path, NewGroupName=new_name)
+
+    response = iam_root.get_group(GroupName=new_name)
+    group = response['Group']
+    assert new_path == group['Path']
+    assert new_name == group['GroupName']
+    assert group_id == group['GroupId']
+    arn = group['Arn']
+    assert arn.startswith('arn:aws:iam:')
+    assert arn.endswith(f':group{new_path}{new_name}')
+    users = response['Users']
+    assert len(users) == 1
+    assert username == users[0]['UserName']
+
+    response = iam_root.list_groups_for_user(UserName=username)
+    groups = response['Groups']
+    assert len(groups) == 1
+    assert new_path == groups[0]['Path']
+    assert new_name == groups[0]['GroupName']
+    assert group_id == groups[0]['GroupId']
+
+
+# IAM GroupPolicy apis
+@pytest.mark.group_policy
+@pytest.mark.iam_account
+def test_account_inline_group_policy(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    policy_name = 'List'
+    bucket_name = get_new_bucket_name()
+    policy1 = json.dumps({'Version': '2012-10-17', 'Statement': [
+        {'Effect': 'Deny',
+         'Action': 's3:ListBucket',
+         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
+    policy2 = json.dumps({'Version': '2012-10-17', 'Statement': [
+        {'Effect': 'Allow',
+         'Action': 's3:ListBucket',
+         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
+
+    # Get/Put/Delete fail on nonexistent GroupName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy1)
+
+    iam_root.create_group(GroupName=name, Path=path)
+
+    # Get/Delete fail on nonexistent PolicyName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
+
+    iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy1)
+
+    response = iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
+    assert policy1 == json.dumps(response['PolicyDocument'])
+    response = iam_root.list_group_policies(GroupName=name)
+    assert [policy_name] == response['PolicyNames']
+
+    iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy2)
+
+    response = iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
+    assert policy2 == json.dumps(response['PolicyDocument'])
+    response = iam_root.list_group_policies(GroupName=name)
+    assert [policy_name] == response['PolicyNames']
+
+    # DeleteGroup fails while policies are still attached
+    with pytest.raises(iam_root.exceptions.DeleteConflictException):
+        iam_root.delete_group(GroupName=name)
+
+    iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
+
+    # Get/Delete fail after Delete
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
+
+    response = iam_root.list_group_policies(GroupName=name)
+    assert [] == response['PolicyNames']
+
+@pytest.mark.group_policy
+@pytest.mark.iam_account
+def test_account_managed_group_policy(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
+    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
+
+    # Attach/Detach/List fail on nonexistent GroupName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.list_attached_group_policies(GroupName=name)
+
+    iam_root.create_group(GroupName=name, Path=path)
+
+    # Detach fails on unattached PolicyArn
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy1)
+
+    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
+    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
+
+    response = iam_root.list_attached_group_policies(GroupName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_group_policies(GroupName=name)
+    policies = response['AttachedPolicies']
+    assert len(policies) == 2
+    names = [p['PolicyName'] for p in policies]
+    arns = [p['PolicyArn'] for p in policies]
+    assert 'AmazonS3FullAccess' in names
+    assert policy1 in arns
+    assert 'AmazonS3ReadOnlyAccess' in names
+    assert policy2 in arns
+
+    iam_root.detach_group_policy(GroupName=name, PolicyArn=policy2)
+
+    # Detach fails after Detach
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_group_policies(GroupName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    # DeleteGroup fails while policies are still attached
+    with pytest.raises(iam_root.exceptions.DeleteConflictException):
+        iam_root.delete_group(GroupName=name)
+
+@pytest.mark.group_policy
+@pytest.mark.iam_account
+def test_account_inline_group_policy_allow(iam_root):
+    path = get_iam_path_prefix()
+    username = make_iam_name('User')
+    groupname = make_iam_name('Group')
+    bucket_name = get_new_bucket_name()
+
+    iam_root.create_user(UserName=username, Path=path)
+
+    key = iam_root.create_access_key(UserName=username)['AccessKey']
+    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                              aws_secret_access_key=key['SecretAccessKey'])
+
+    iam_root.create_group(GroupName=groupname, Path=path)
+    iam_root.add_user_to_group(GroupName=groupname, UserName=username)
+
+    # the access key may take a bit to start working. retry until it returns
+    # something other than InvalidAccessKeyId
+    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
+    # expect AccessDenied because no identity policy allows s3 actions
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    # add a group policy that allows s3 actions
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:*',
+            'Resource': '*'
+            }]
+        })
+    policy_name = 'AllowStar'
+    iam_root.put_group_policy(GroupName=groupname, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, client.list_buckets)
+
+@pytest.mark.group_policy
+@pytest.mark.iam_account
+def test_account_managed_group_policy_allow(iam_root):
+    path = get_iam_path_prefix()
+    username = make_iam_name('User')
+    groupname = make_iam_name('Group')
+    bucket_name = get_new_bucket_name()
+
+    iam_root.create_user(UserName=username, Path=path)
+
+    key = iam_root.create_access_key(UserName=username)['AccessKey']
+    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                              aws_secret_access_key=key['SecretAccessKey'])
+
+    iam_root.create_group(GroupName=groupname, Path=path)
+    iam_root.add_user_to_group(GroupName=groupname, UserName=username)
+
+    # the access key may take a bit to start working. retry until it returns
+    # something other than InvalidAccessKeyId
+    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
+    # expect AccessDenied because no identity policy allows s3 actions
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    # add a group policy that allows s3 read actions
+    policy_arn = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
+    iam_root.attach_group_policy(GroupName=groupname, PolicyArn=policy_arn)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, client.list_buckets)
+
+
+assume_role_policy = json.dumps({
+    'Version': '2012-10-17',
+    'Statement': [{
+        'Effect': 'Allow',
+        'Action': 'sts:AssumeRole',
+        'Principal': {'AWS': '*'}
+        }]
+    })
+
+# IAM Role apis
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_create(iam_root):
+    path = get_iam_path_prefix()
+    name1 = make_iam_name('R1')
+    desc = 'my role description'
+    max_duration = 43200
+    response = iam_root.create_role(RoleName=name1, Path=path, AssumeRolePolicyDocument=assume_role_policy, Description=desc, MaxSessionDuration=max_duration)
+    role = response['Role']
+    assert role['Path'] == path
+    assert role['RoleName'] == name1
+    assert assume_role_policy == json.dumps(role['AssumeRolePolicyDocument'])
+    assert len(role['RoleId'])
+    arn = role['Arn']
+    assert arn.startswith('arn:aws:iam:')
+    assert arn.endswith(f':role{path}{name1}')
+    assert role['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+    # AWS doesn't include these for CreateRole, only GetRole
+    #assert desc == role['Description']
+    #assert max_duration == role['MaxSessionDuration']
+
+    response = iam_root.get_role(RoleName=name1)
+    role = response['Role']
+    assert arn == role['Arn']
+    assert desc == role['Description']
+    assert max_duration == role['MaxSessionDuration']
+
+    path2 = get_iam_path_prefix() + 'foo/'
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_role(RoleName=name1, Path=path2, AssumeRolePolicyDocument=assume_role_policy)
+
+    name2 = make_iam_name('R2')
+    response = iam_root.create_role(RoleName=name2, Path=path2, AssumeRolePolicyDocument=assume_role_policy)
+    role = response['Role']
+    assert role['Path'] == path2
+    assert role['RoleName'] == name2
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_case_insensitive_name(iam_root):
+    path = get_iam_path_prefix()
+    name_upper = make_iam_name('R1')
+    name_lower = make_iam_name('r1')
+    response = iam_root.create_role(RoleName=name_upper, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+    rid = response['Role']['RoleId']
+
+    # name is case-insensitive, so 'r1' should also conflict
+    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
+        iam_root.create_role(RoleName=name_lower, AssumeRolePolicyDocument=assume_role_policy)
+
+    # search for 'r1' should return the same 'R1' role
+    response = iam_root.get_role(RoleName=name_lower)
+    assert rid == response['Role']['RoleId']
+
+    # delete for 'r1' should delete the same 'R1' role
+    iam_root.delete_role(RoleName=name_lower)
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_role(RoleName=name_lower)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_delete(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('U1')
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_role(RoleName=name)
+
+    response = iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+    uid = response['Role']['RoleId']
+    create_date = response['Role']['CreateDate']
+
+    iam_root.delete_role(RoleName=name)
+
+    response = iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+    assert uid != response['Role']['RoleId']
+    assert create_date <= response['Role']['CreateDate']
+
+def role_list_names(client, **kwargs):
+    p = client.get_paginator('list_roles')
+    rolenames = []
+    for response in p.paginate(**kwargs):
+        rolenames += [u['RoleName'] for u in response['Roles']]
+    return rolenames
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_list(iam_root):
+    path = get_iam_path_prefix()
+    response = iam_root.list_roles(PathPrefix=path)
+    assert len(response['Roles']) == 0
+    assert response['IsTruncated'] == False
+
+    name1 = make_iam_name('aa')
+    name2 = make_iam_name('Ab')
+    name3 = make_iam_name('ac')
+    name4 = make_iam_name('Ad')
+
+    # sort order is independent of CreateDate, Path, and RoleName capitalization
+    iam_root.create_role(RoleName=name4, Path=path+'w/', AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name3, Path=path+'x/', AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name2, Path=path+'y/', AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name1, Path=path+'z/', AssumeRolePolicyDocument=assume_role_policy)
+
+    assert [name1, name2, name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path)
+    assert [name1, name2, name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_list_path_prefix(iam_root):
+    path = get_iam_path_prefix()
+    response = iam_root.list_roles(PathPrefix=path)
+    assert len(response['Roles']) == 0
+    assert response['IsTruncated'] == False
+
+    name1 = make_iam_name('a')
+    name2 = make_iam_name('b')
+    name3 = make_iam_name('c')
+    name4 = make_iam_name('d')
+
+    iam_root.create_role(RoleName=name1, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name2, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name3, Path=path+'a/', AssumeRolePolicyDocument=assume_role_policy)
+    iam_root.create_role(RoleName=name4, Path=path+'a/x/', AssumeRolePolicyDocument=assume_role_policy)
+
+    assert [name1, name2, name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path)
+    assert [name1, name2, name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path,
+                            PaginationConfig={'PageSize': 1})
+    assert [name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path+'a')
+    assert [name3, name4] == \
+            role_list_names(iam_root, PathPrefix=path+'a',
+                            PaginationConfig={'PageSize': 1})
+    assert [name4] == \
+            role_list_names(iam_root, PathPrefix=path+'a/x')
+    assert [name4] == \
+            role_list_names(iam_root, PathPrefix=path+'a/x',
+                            PaginationConfig={'PageSize': 1})
+    assert [] == role_list_names(iam_root, PathPrefix=path+'a/x/d')
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+def test_account_role_update(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('a')
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.update_role(RoleName=name)
+
+    iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+
+    response = iam_root.get_role(RoleName=name)
+    assert name == response['Role']['RoleName']
+    arn = response['Role']['Arn']
+    rid = response['Role']['RoleId']
+
+    desc = 'my role description'
+    iam_root.update_role(RoleName=name, Description=desc, MaxSessionDuration=43200)
+
+    response = iam_root.get_role(RoleName=name)
+    assert rid == response['Role']['RoleId']
+    assert arn == response['Role']['Arn']
+    assert desc == response['Role']['Description']
+    assert 43200 == response['Role']['MaxSessionDuration']
+
+
+role_policy = json.dumps({
+    'Version': '2012-10-17',
+    'Statement': [{
+        'Effect': 'Allow',
+        'Action': 's3:*',
+        "Resource": "*"
+        }]
+    })
+
+# IAM RolePolicy apis
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_account_role_policy(iam_root):
+    path = get_iam_path_prefix()
+    role_name = make_iam_name('r')
+    policy_name = 'MyPolicy'
+    policy2_name = 'AnotherPolicy'
+
+    # Get/Put/Delete fail on nonexistent RoleName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)
+
+    iam_root.create_role(RoleName=role_name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+
+    # Get/Delete fail on nonexistent PolicyName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
+
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)
+
+    response = iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
+    assert role_name == response['RoleName']
+    assert policy_name == response['PolicyName']
+    assert role_policy == json.dumps(response['PolicyDocument'])
+
+    response = iam_root.list_role_policies(RoleName=role_name)
+    assert [policy_name] == response['PolicyNames']
+
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy2_name, PolicyDocument=role_policy)
+
+    response = iam_root.list_role_policies(RoleName=role_name)
+    assert [policy2_name, policy_name] == response['PolicyNames']
+
+    iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
+    iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy2_name)
+
+    # Get/Delete fail after Delete
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
+
+@pytest.mark.role_policy
+@pytest.mark.iam_account
+def test_account_role_policy_managed(iam_root):
+    path = get_iam_path_prefix()
+    name = make_iam_name('name')
+    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
+    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
+
+    # Attach/Detach/List fail on nonexistent RoleName
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy1)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.list_attached_role_policies(RoleName=name)
+
+    iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
+
+    # Detach fails on unattached PolicyArn
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy1)
+
+    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
+    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
+
+    response = iam_root.list_attached_role_policies(RoleName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_role_policies(RoleName=name)
+    policies = response['AttachedPolicies']
+    assert len(policies) == 2
+    names = [p['PolicyName'] for p in policies]
+    arns = [p['PolicyArn'] for p in policies]
+    assert 'AmazonS3FullAccess' in names
+    assert policy1 in arns
+    assert 'AmazonS3ReadOnlyAccess' in names
+    assert policy2 in arns
+
+    iam_root.detach_role_policy(RoleName=name, PolicyArn=policy2)
+
+    # Detach fails after Detach
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy2)
+
+    response = iam_root.list_attached_role_policies(RoleName=name)
+    assert len(response['AttachedPolicies']) == 1
+    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
+    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
+
+    # DeleteRole fails while policies are still attached
+    with pytest.raises(iam_root.exceptions.DeleteConflictException):
+        iam_root.delete_role(RoleName=name)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_account_role_policy_allow(iam_root):
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('MyUser')
+    role_name = make_iam_name('MyRole')
+    session_name = 'MySession'
+
+    user = iam_root.create_user(UserName=user_name, Path=path)['User']
+    user_arn = user['Arn']
+
+    trust_policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'sts:AssumeRole',
+            'Principal': {'AWS': user_arn}
+            }]
+        })
+    # returns MalformedPolicyDocument until the user arn starts working
+    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
+                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
+    role_arn = role['Arn']
+
+    key = iam_root.create_access_key(UserName=user_name)['AccessKey']
+    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
+                         aws_secret_access_key=key['SecretAccessKey'])
+
+    # returns InvalidClientTokenId or AccessDenied until the access key starts working
+    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
+                        RoleArn=role_arn, RoleSessionName=session_name)
+    creds = response['Credentials']
+
+    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
+                          aws_secret_access_key = creds['SecretAccessKey'],
+                          aws_session_token = creds['SessionToken'])
+
+    # expect AccessDenied because no identity policy allows s3 actions
+    e = assert_raises(ClientError, s3.list_buckets)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    policy_name = 'AllowListAllMyBuckets'
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:ListAllMyBuckets',
+            'Resource': '*'
+            }]
+        })
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, s3.list_buckets)
+
+# alt account user assumes main account role to access main account bucket
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_same_account_role_policy_allow(iam_root, iam_alt_root):
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    role_name = make_iam_name('MyRole')
+    session_name = 'MySession'
+    bucket_name = get_new_bucket_name()
+
+    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
+    user_arn = user['Arn']
+    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
+
+    s3_main = get_iam_root_client(service_name='s3')
+    s3_main.create_bucket(Bucket=bucket_name)
+
+    trust_policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'sts:AssumeRole',
+            'Principal': {'AWS': user_arn}
+            }]
+        })
+    # returns MalformedPolicyDocument until the user arn starts working
+    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
+                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
+    role_arn = role['Arn']
+
+    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
+                         aws_secret_access_key=key['SecretAccessKey'])
+
+    # returns InvalidClientTokenId or AccessDenied until the access key starts working
+    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
+                        RoleArn=role_arn, RoleSessionName=session_name)
+    creds = response['Credentials']
+
+    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
+                          aws_secret_access_key = creds['SecretAccessKey'],
+                          aws_session_token = creds['SessionToken'])
+
+    # expect AccessDenied because no identity policy allows s3 actions
+    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    policy_name = 'AllowListBucket'
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:ListBucket',
+            'Resource': '*'
+            }]
+        })
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, s3.list_objects, Bucket=bucket_name)
+
+# alt account user assumes main account role to access alt account bucket
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_cross_account_role_policy_allow(iam_root, iam_alt_root):
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    role_name = make_iam_name('MyRole')
+    session_name = 'MySession'
+    bucket_name = get_new_bucket_name()
+
+    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
+    user_arn = user['Arn']
+    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
+
+    s3_alt = get_iam_alt_root_client(service_name='s3')
+    s3_alt.create_bucket(Bucket=bucket_name)
+
+    trust_policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'sts:AssumeRole',
+            'Principal': {'AWS': user_arn}
+            }]
+        })
+    # returns MalformedPolicyDocument until the user arn starts working
+    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
+                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
+    role_arn = role['Arn']
+
+    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
+                         aws_secret_access_key=key['SecretAccessKey'])
+
+    # returns InvalidClientTokenId or AccessDenied until the access key starts working
+    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
+                        RoleArn=role_arn, RoleSessionName=session_name)
+    creds = response['Credentials']
+
+    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
+                          aws_secret_access_key = creds['SecretAccessKey'],
+                          aws_session_token = creds['SessionToken'])
+
+    # expect AccessDenied because no identity policy allows s3 actions
+    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    policy_name = 'AllowListBucket'
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:ListBucket',
+            'Resource': '*'
+            }]
+        })
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # expect AccessDenied because no resource policy allows the main account
+    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    # add a bucket policy that allows s3:ListBucket for the main account's arn
+    main_arn = iam_root.get_user()['User']['Arn']
+    s3_alt.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Principal': {'AWS': main_arn},
+            'Action': 's3:ListBucket',
+            'Resource': f'arn:aws:s3:::{bucket_name}'
+            }]
+        }))
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, s3.list_objects, Bucket=bucket_name)
+
+# alt account user assumes main account role to create a bucket
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_account_role_policy_allow_create_bucket(iam_root, iam_alt_root):
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    role_name = make_iam_name('MyRole')
+    session_name = 'MySession'
+    bucket_name = get_new_bucket_name()
+
+    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
+    user_arn = user['Arn']
+    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
+
+    trust_policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'sts:AssumeRole',
+            'Principal': {'AWS': user_arn}
+            }]
+        })
+    # returns MalformedPolicyDocument until the user arn starts working
+    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
+                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
+    role_arn = role['Arn']
+
+    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
+                         aws_secret_access_key=key['SecretAccessKey'])
+
+    # returns InvalidClientTokenId or AccessDenied until the access key starts working
+    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
+                        RoleArn=role_arn, RoleSessionName=session_name)
+    creds = response['Credentials']
+
+    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
+                          aws_secret_access_key = creds['SecretAccessKey'],
+                          aws_session_token = creds['SessionToken'])
+
+    # expect AccessDenied because no identity policy allows s3 actions
+    e = assert_raises(ClientError, s3.create_bucket, Bucket=bucket_name, ObjectOwnership='ObjectWriter', ACL='private')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    policy_name = 'AllowCreateBucket'
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': ['s3:CreateBucket', 's3:PutBucketAcl'],
+            'Resource': '*'
+            }]
+        })
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, s3.create_bucket, Bucket=bucket_name, ObjectOwnership='ObjectWriter', ACL='private')
+
+    # verify that the bucket is owned by the role's account
+    s3_main = get_iam_root_client(service_name='s3')
+    response = s3_main.get_bucket_acl(Bucket=bucket_name)
+
+    main_arn = iam_root.get_user()['User']['Arn']
+    account_id = main_arn.removeprefix('arn:aws:iam::').removesuffix(':root')
+    assert response['Owner']['ID'] == account_id
+    assert response['Grants'][0]['Grantee']['ID'] == account_id
+
+# alt account user assumes main account role to read the role info
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.iam_role
+@pytest.mark.role_policy
+def test_account_role_policy_allow_get_role(iam_root, iam_alt_root):
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    role_name = make_iam_name('MyRole')
+    session_name = 'MySession'
+    bucket_name = get_new_bucket_name()
+
+    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
+    user_arn = user['Arn']
+    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
+
+    trust_policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'sts:AssumeRole',
+            'Principal': {'AWS': user_arn}
+            }]
+        })
+    # returns MalformedPolicyDocument until the user arn starts working
+    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
+                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
+    role_arn = role['Arn']
+
+    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
+                         aws_secret_access_key=key['SecretAccessKey'])
+
+    # returns InvalidClientTokenId or AccessDenied until the access key starts working
+    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
+                        RoleArn=role_arn, RoleSessionName=session_name)
+    creds = response['Credentials']
+
+    iam = get_iam_root_client(service_name='iam',
+                              aws_access_key_id = creds['AccessKeyId'],
+                              aws_secret_access_key = creds['SecretAccessKey'],
+                              aws_session_token = creds['SessionToken'])
+
+    # expect AccessDenied because no identity policy allows iam actions
+    e = assert_raises(ClientError, iam.get_role, RoleName=role_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    policy_name = 'AllowGetRole'
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 'iam:GetRole',
+            'Resource': '*'
+            }]
+        })
+    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
+
+    # the policy may take a bit to start working. retry until it returns
+    # something other than AccessDenied
+    retry_on('AccessDenied', 10, iam.get_role, RoleName=role_name)
+
+
+# IAM OpenIDConnectProvider apis
+@pytest.mark.iam_account
+def test_account_oidc_provider(iam_root):
+    url_host = get_iam_path_prefix()[1:] + 'example.com'
+    url = 'http://' + url_host
+
+    response = iam_root.create_open_id_connect_provider(
+            ClientIDList=['my-application-id'],
+            ThumbprintList=['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'],
+            Url=url)
+    arn = response['OpenIDConnectProviderArn']
+    assert arn.endswith(f':oidc-provider/{url_host}')
+
+    response = iam_root.list_open_id_connect_providers()
+    arns = [p['Arn'] for p in response['OpenIDConnectProviderList']]
+    assert arn in arns
+
+    response = iam_root.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+    assert url == response['Url']
+    assert ['my-application-id'] == response['ClientIDList']
+    assert ['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'] == response['ThumbprintList']
+
+    iam_root.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+
+    response = iam_root.list_open_id_connect_providers()
+    arns = [p['Arn'] for p in response['OpenIDConnectProviderList']]
+    assert arn not in arns
+
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
+        iam_root.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+
+
+# test cross-account access, adding user policy before the bucket policy
+def _test_cross_account_user_bucket_policy(roots3, alt_root, alt_name, alt_arn):
+    # add a user policy that allows s3 actions
+    alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:*',
+            'Resource': '*'
+            }]
+        }))
+
+    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
+    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                             aws_secret_access_key=key['SecretAccessKey'])
+
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        # the access key may take a bit to start working. retry until it returns
+        # something other than InvalidAccessKeyId
+        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a bucket policy that allows s3:ListBucket for the iam user's arn
+        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Principal': {'AWS': alt_arn},
+                'Action': 's3:ListBucket',
+                'Resource': f'arn:aws:s3:::{bucket}'
+                }]
+            }))
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+# test cross-account access, adding bucket policy before the user policy
+def _test_cross_account_bucket_user_policy(roots3, alt_root, alt_name, alt_arn):
+    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
+    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                             aws_secret_access_key=key['SecretAccessKey'])
+
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        # add a bucket policy that allows s3:ListBucket for the iam user's arn
+        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Principal': {'AWS': alt_arn},
+                'Action': 's3:ListBucket',
+                'Resource': f'arn:aws:s3:::{bucket}'
+                }]
+            }))
+
+        # the access key may take a bit to start working. retry until it returns
+        # something other than InvalidAccessKeyId
+        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a user policy that allows s3 actions
+        alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Action': 's3:*',
+                'Resource': '*'
+                }]
+            }))
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_bucket_user_policy_allow_user_arn(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, user_arn)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_user_bucket_policy_allow_user_arn(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, user_arn)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_user_bucket_policy_allow_account_arn(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    account_arn = user_arn.replace(f':user{path}{user_name}', ':root')
+    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, account_arn)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_bucket_user_policy_allow_account_arn(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    account_arn = user_arn.replace(f':user{path}{user_name}', ':root')
+    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, account_arn)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_user_bucket_policy_allow_account_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    account_id = user_arn.removeprefix('arn:aws:iam::').removesuffix(f':user{path}{user_name}')
+    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, account_id)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_bucket_user_policy_allow_account_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    user_arn = response['User']['Arn']
+    account_id = user_arn.removeprefix('arn:aws:iam::').removesuffix(f':user{path}{user_name}')
+    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, account_id)
+
+
+# test cross-account access, adding user policy before the bucket acl
+def _test_cross_account_user_policy_bucket_acl(roots3, alt_root, alt_name, grantee):
+    # add a user policy that allows s3 actions
+    alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Action': 's3:*',
+            'Resource': '*'
+            }]
+        }))
+
+    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
+    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                             aws_secret_access_key=key['SecretAccessKey'])
+
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        # the access key may take a bit to start working. retry until it returns
+        # something other than InvalidAccessKeyId
+        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a bucket acl that grants READ access
+        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+# test cross-account access, adding bucket acl before the user policy
+def _test_cross_account_bucket_acl_user_policy(roots3, alt_root, alt_name, grantee):
+    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
+    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
+                             aws_secret_access_key=key['SecretAccessKey'])
+
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        # add a bucket acl that grants READ access
+        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
+
+        # the access key may take a bit to start working. retry until it returns
+        # something other than InvalidAccessKeyId
+        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a user policy that allows s3 actions
+        alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Action': 's3:*',
+                'Resource': '*'
+                }]
+            }))
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.fails_on_aws # can't grant to individual users
+def test_cross_account_bucket_acl_user_policy_grant_user_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'id=' + response['User']['UserId']
+    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.fails_on_aws # can't grant to individual users
+def test_cross_account_user_policy_bucket_acl_grant_user_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    response = iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'id=' + response['User']['UserId']
+    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_bucket_acl_user_policy_grant_canonical_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'id=' + get_iam_alt_root_user_id()
+    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_user_policy_bucket_acl_grant_canonical_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'id=' + get_iam_alt_root_user_id()
+    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_bucket_acl_user_policy_grant_account_email(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'emailAddress=' + get_iam_alt_root_email()
+    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_user_policy_bucket_acl_grant_account_email(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    path = get_iam_path_prefix()
+    user_name = make_iam_name('AltUser')
+    iam_alt_root.create_user(UserName=user_name, Path=path)
+    grantee = 'emailAddress=' + get_iam_alt_root_email()
+    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
+
+
+# test root cross-account access with bucket policy
+def _test_cross_account_root_bucket_policy(roots3, alts3, alt_arn):
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        e = assert_raises(ClientError, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a bucket policy that allows s3:ListBucket for the iam user's arn
+        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
+            'Version': '2012-10-17',
+            'Statement': [{
+                'Effect': 'Allow',
+                'Principal': {'AWS': alt_arn},
+                'Action': 's3:ListBucket',
+                'Resource': f'arn:aws:s3:::{bucket}'
+                }]
+            }))
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_root_bucket_policy_allow_account_arn(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    alts3 = get_iam_alt_root_client(service_name='s3')
+    alt_arn = iam_alt_root.get_user()['User']['Arn']
+    _test_cross_account_root_bucket_policy(roots3, alts3, alt_arn)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_root_bucket_policy_allow_account_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    alts3 = get_iam_alt_root_client(service_name='s3')
+    alt_arn = iam_alt_root.get_user()['User']['Arn']
+    account_id = alt_arn.removeprefix('arn:aws:iam::').removesuffix(':root')
+    _test_cross_account_root_bucket_policy(roots3, alts3, account_id)
+
+# test root cross-account access with bucket acls
+def _test_cross_account_root_bucket_acl(roots3, alts3, grantee):
+    # create a bucket with the root user
+    bucket = get_new_bucket(roots3)
+    try:
+        e = assert_raises(ClientError, alts3.list_objects, Bucket=bucket)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'AccessDenied'
+
+        # add a bucket acl that grants READ
+        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
+
+        # verify that the iam user can eventually access it
+        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
+    finally:
+        roots3.delete_bucket(Bucket=bucket)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_root_bucket_acl_grant_canonical_id(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    alts3 = get_iam_alt_root_client(service_name='s3')
+    grantee = 'id=' + get_iam_alt_root_user_id()
+    _test_cross_account_root_bucket_acl(roots3, alts3, grantee)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+def test_cross_account_root_bucket_acl_grant_account_email(iam_root, iam_alt_root):
+    roots3 = get_iam_root_client(service_name='s3')
+    alts3 = get_iam_alt_root_client(service_name='s3')
+    grantee = 'emailAddress=' + get_iam_alt_root_email()
+    _test_cross_account_root_bucket_acl(roots3, alts3, grantee)
diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py
new file mode 100644 (file)
index 0000000..a0c3fac
--- /dev/null
@@ -0,0 +1,13553 @@
+import boto3
+import botocore.session
+from botocore.exceptions import ClientError
+from botocore.exceptions import ParamValidationError
+import isodate
+import email.utils
+import datetime
+import threading
+import re
+import pytz
+from collections import OrderedDict
+import requests
+import json
+import base64
+import hmac
+import hashlib
+import xml.etree.ElementTree as ET
+import time
+import operator
+import pytest
+import os
+import string
+import random
+import socket
+import dateutil.parser
+import ssl
+from collections import namedtuple
+from collections import defaultdict
+from io import StringIO
+from io import BytesIO
+
+from email.header import decode_header
+
+from .utils import assert_raises
+from .utils import generate_random
+from .utils import _get_status_and_error_code
+from .utils import _get_status
+
+from .policy import Policy, Statement, make_json_policy
+
+from . import (
+    configfile,
+    setup_teardown,
+    get_client,
+    get_prefix,
+    get_unauthenticated_client,
+    get_bad_auth_client,
+    get_v2_client,
+    get_new_bucket,
+    get_new_bucket_name,
+    get_new_bucket_resource,
+    get_config_is_secure,
+    get_config_host,
+    get_config_port,
+    get_config_endpoint,
+    get_config_ssl_verify,
+    get_main_aws_access_key,
+    get_main_aws_secret_key,
+    get_main_display_name,
+    get_main_user_id,
+    get_main_email,
+    get_main_api_name,
+    get_alt_aws_access_key,
+    get_alt_aws_secret_key,
+    get_alt_display_name,
+    get_alt_user_id,
+    get_alt_email,
+    get_alt_client,
+    get_tenant_client,
+    get_tenant_iam_client,
+    get_tenant_user_id,
+    get_buckets_list,
+    get_objects_list,
+    get_main_kms_keyid,
+    get_secondary_kms_keyid,
+    get_svc_client,
+    get_cloud_storage_class,
+    get_cloud_retain_head_object,
+    get_cloud_regular_storage_class,
+    get_cloud_target_path,
+    get_cloud_target_storage_class,
+    get_cloud_client,
+    nuke_prefixed_buckets,
+    configured_storage_classes,
+    get_lc_debug_interval,
+    )
+
+
+def _bucket_is_empty(bucket):
+    is_empty = True
+    for obj in bucket.objects.all():
+        is_empty = False
+        break
+    return is_empty
+
+def test_bucket_list_empty():
+    bucket = get_new_bucket_resource()
+    is_empty = _bucket_is_empty(bucket)
+    assert is_empty == True
+
+@pytest.mark.list_objects_v2
+def test_bucket_list_distinct():
+    bucket1 = get_new_bucket_resource()
+    bucket2 = get_new_bucket_resource()
+    obj = bucket1.put_object(Body='str', Key='asdf')
+    is_empty = _bucket_is_empty(bucket2)
+    assert is_empty == True
+
+def _create_objects(bucket=None, bucket_name=None, keys=[]):
+    """
+    Populate a (specified or new) bucket with objects with
+    specified names (and contents identical to their names).
+    """
+    if bucket_name is None:
+        bucket_name = get_new_bucket_name()
+    if bucket is None:
+        bucket = get_new_bucket_resource(name=bucket_name)
+
+    for key in keys:
+        obj = bucket.put_object(Body=key, Key=key)
+
+    return bucket_name
+
+def _get_keys(response):
+    """
+    return lists of strings that are the keys from a client.list_objects() response
+    """
+    keys = []
+    if 'Contents' in response:
+        objects_list = response['Contents']
+        keys = [obj['Key'] for obj in objects_list]
+    return keys
+
+def _get_prefixes(response):
+    """
+    return lists of strings that are prefixes from a client.list_objects() response
+    """
+    prefixes = []
+    if 'CommonPrefixes' in response:
+        prefix_list = response['CommonPrefixes']
+        prefixes = [prefix['Prefix'] for prefix in prefix_list]
+    return prefixes
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_many():
+    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
+    keys = _get_keys(response)
+    assert len(keys) == 2
+    assert keys == ['bar', 'baz']
+    assert response['IsTruncated'] == True
+
+    response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
+    keys = _get_keys(response)
+    assert len(keys) == 1
+    assert response['IsTruncated'] == False
+    assert keys == ['foo']
+
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_many():
+    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
+    keys = _get_keys(response)
+    assert len(keys) == 2
+    assert keys == ['bar', 'baz']
+    assert response['IsTruncated'] == True
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
+    keys = _get_keys(response)
+    assert len(keys) == 1
+    assert response['IsTruncated'] == False
+    assert keys == ['foo']
+
+@pytest.mark.list_objects_v2
+def test_basic_key_count():
+    client = get_client()
+    bucket_names = []
+    bucket_name = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name)
+    for j in range(5):
+            client.put_object(Bucket=bucket_name, Key=str(j))
+    response1 = client.list_objects_v2(Bucket=bucket_name)
+    assert response1['KeyCount'] == 5
+
+def test_bucket_list_delimiter_basic():
+    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    assert response['Delimiter'] == '/'
+    keys = _get_keys(response)
+    assert keys == ['asdf']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    assert prefixes == ['foo/', 'quux/']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_basic():
+    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
+    assert response['Delimiter'] == '/'
+    keys = _get_keys(response)
+    assert keys == ['asdf']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    assert prefixes == ['foo/', 'quux/']
+    assert response['KeyCount'] == len(prefixes) + len(keys)
+
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_encoding_basic():
+    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
+    assert response['Delimiter'] == '/'
+    keys = _get_keys(response)
+    assert keys == ['asdf%2Bb']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 3
+    assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/']
+
+def test_bucket_list_encoding_basic():
+    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
+    assert response['Delimiter'] == '/'
+    keys = _get_keys(response)
+    assert keys == ['asdf%2Bb']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 3
+    assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/']
+
+
+def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
+                         is_truncated, check_objs, check_prefixes, next_marker):
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
+    assert response['IsTruncated'] == is_truncated
+    if 'NextMarker' not in response:
+        response['NextMarker'] = None
+    assert response['NextMarker'] == next_marker
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+
+    assert len(keys) == len(check_objs)
+    assert len(prefixes) == len(check_prefixes)
+    assert keys == check_objs
+    assert prefixes == check_prefixes
+
+    return response['NextMarker']
+
+def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
+                         is_truncated, check_objs, check_prefixes, last=False):
+    client = get_client()
+
+    params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
+    if continuation_token is not None:
+        params['ContinuationToken'] = continuation_token
+    else:
+        params['StartAfter'] = ''
+    response = client.list_objects_v2(**params)
+    assert response['IsTruncated'] == is_truncated
+    if 'NextContinuationToken' not in response:
+        response['NextContinuationToken'] = None
+    if last:
+        assert response['NextContinuationToken'] == None
+
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+
+    assert len(keys) == len(check_objs)
+    assert len(prefixes) == len(check_prefixes)
+    assert keys == check_objs
+    assert prefixes == check_prefixes
+
+    return response['NextContinuationToken']
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_delimiter_prefix():
+    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
+
+    delim = '/'
+    marker = ''
+    prefix = ''
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
+
+    prefix = 'boo/'
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
+
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_delimiter_prefix():
+    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
+
+    delim = '/'
+    continuation_token = ''
+    prefix = ''
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
+
+    prefix = 'boo/'
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
+
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
+    bucket_name = _create_objects(keys=['asdf/'])
+    validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
+
+def test_bucket_list_delimiter_prefix_ends_with_delimiter():
+    bucket_name = _create_objects(keys=['asdf/'])
+    validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
+
+def test_bucket_list_delimiter_alt():
+    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='a')
+    assert response['Delimiter'] == 'a'
+
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    assert prefixes == ['ba', 'ca']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_alt():
+    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
+    assert response['Delimiter'] == 'a'
+
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    assert prefixes == ['ba', 'ca']
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_delimiter_prefix_underscore():
+    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
+
+    delim = '/'
+    marker = ''
+    prefix = ''
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
+
+    prefix = '_under1/'
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
+
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_delimiter_prefix_underscore():
+    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
+
+    delim = '/'
+    continuation_token = ''
+    prefix = ''
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
+
+    prefix = '_under1/'
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
+
+
+def test_bucket_list_delimiter_percentage():
+    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='%')
+    assert response['Delimiter'] == '%'
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b%', 'c%']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_percentage():
+    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
+    assert response['Delimiter'] == '%'
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b%', 'c%']
+
+def test_bucket_list_delimiter_whitespace():
+    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
+    assert response['Delimiter'] == ' '
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b ', 'c ']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_whitespace():
+    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
+    assert response['Delimiter'] == ' '
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b ', 'c ']
+
+def test_bucket_list_delimiter_dot():
+    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='.')
+    assert response['Delimiter'] == '.'
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b.', 'c.']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_dot():
+    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
+    assert response['Delimiter'] == '.'
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    assert keys == ['foo']
+
+    prefixes = _get_prefixes(response)
+    assert len(prefixes) == 2
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    assert prefixes == ['b.', 'c.']
+
+def test_bucket_list_delimiter_unreadable():
+    key_names=['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
+    assert response['Delimiter'] == '\x0a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_unreadable():
+    key_names=['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
+    assert response['Delimiter'] == '\x0a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+def test_bucket_list_delimiter_empty():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='')
+    # putting an empty value into Delimiter will not return a value in the response
+    assert not 'Delimiter' in response
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_empty():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
+    # putting an empty value into Delimiter will not return a value in the response
+    assert not 'Delimiter' in response
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+def test_bucket_list_delimiter_none():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    # putting an empty value into Delimiter will not return a value in the response
+    assert not 'Delimiter' in response
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_none():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    # putting an empty value into Delimiter will not return a value in the response
+    assert not 'Delimiter' in response
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_fetchowner_notempty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
+    objs_list = response['Contents']
+    assert 'Owner' in objs_list[0]
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_fetchowner_defaultempty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    objs_list = response['Contents']
+    assert not 'Owner' in objs_list[0]
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_fetchowner_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
+    objs_list = response['Contents']
+    assert not 'Owner' in objs_list[0]
+
+def test_bucket_list_delimiter_not_exist():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    # putting an empty value into Delimiter will not return a value in the response
+    assert response['Delimiter'] == '/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_delimiter_not_exist():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
+    # putting an empty value into Delimiter will not return a value in the response
+    assert response['Delimiter'] == '/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_delimiter_not_skip_special():
+    key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
+    key_names2 = ['1999', '1999#', '1999+', '2000']
+    key_names += key_names2
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    assert response['Delimiter'] == '/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names2
+    assert prefixes == ['0/']
+
+def test_bucket_list_prefix_basic():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
+    assert response['Prefix'] == 'foo/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['foo/bar', 'foo/baz']
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_basic():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
+    assert response['Prefix'] == 'foo/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['foo/bar', 'foo/baz']
+    assert prefixes == []
+
+# just testing that we can do the delimeter and prefix logic on non-slashes
+def test_bucket_list_prefix_alt():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='ba')
+    assert response['Prefix'] == 'ba'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['bar', 'baz']
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_alt():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
+    assert response['Prefix'] == 'ba'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['bar', 'baz']
+    assert prefixes == []
+
+def test_bucket_list_prefix_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='')
+    assert response['Prefix'] == ''
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
+    assert response['Prefix'] == ''
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+def test_bucket_list_prefix_none():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='')
+    assert response['Prefix'] == ''
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_none():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
+    assert response['Prefix'] == ''
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == key_names
+    assert prefixes == []
+
+def test_bucket_list_prefix_not_exist():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='d')
+    assert response['Prefix'] == 'd'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_not_exist():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
+    assert response['Prefix'] == 'd'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+def test_bucket_list_prefix_unreadable():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
+    assert response['Prefix'] == '\x0a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_unreadable():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
+    assert response['Prefix'] == '\x0a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+def test_bucket_list_prefix_delimiter_basic():
+    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
+    assert response['Prefix'] == 'foo/'
+    assert response['Delimiter'] == '/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['foo/bar']
+    assert prefixes == ['foo/baz/']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_delimiter_basic():
+    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
+    assert response['Prefix'] == 'foo/'
+    assert response['Delimiter'] == '/'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['foo/bar']
+    assert prefixes == ['foo/baz/']
+
+def test_bucket_list_prefix_delimiter_alt():
+    key_names = ['bar', 'bazar', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
+    assert response['Prefix'] == 'ba'
+    assert response['Delimiter'] == 'a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['bar']
+    assert prefixes == ['baza']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_delimiter_alt():
+    key_names = ['bar', 'bazar', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
+    assert response['Prefix'] == 'ba'
+    assert response['Delimiter'] == 'a'
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['bar']
+    assert prefixes == ['baza']
+
+def test_bucket_list_prefix_delimiter_prefix_not_exist():
+    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
+    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+def test_bucket_list_prefix_delimiter_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['b/a/c', 'b/a/g', 'b/a/r']
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == ['b/a/c', 'b/a/g', 'b/a/r']
+    assert prefixes == []
+
+def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    assert keys == []
+    assert prefixes == []
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_maxkeys_one():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
+    assert response['IsTruncated'] == True
+
+    keys = _get_keys(response)
+    assert keys == key_names[0:1]
+
+    response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
+    assert response['IsTruncated'] == False
+
+    keys = _get_keys(response)
+    assert keys == key_names[1:]
+
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_maxkeys_one():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
+    assert response['IsTruncated'] == True
+
+    keys = _get_keys(response)
+    assert keys == key_names[0:1]
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
+    assert response['IsTruncated'] == False
+
+    keys = _get_keys(response)
+    assert keys == key_names[1:]
+
+def test_bucket_list_maxkeys_zero():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
+
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_maxkeys_zero():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
+
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == []
+
+def test_bucket_list_maxkeys_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+    assert response['MaxKeys'] == 1000
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_maxkeys_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+    assert response['MaxKeys'] == 1000
+
+def get_http_response_body(**kwargs):
+    global http_response_body
+    http_response_body = kwargs['http_response'].__dict__['_content']
+
+def parseXmlToJson(xml):
+  response = {}
+
+  for child in list(xml):
+    if len(list(child)) > 0:
+      response[child.tag] = parseXmlToJson(child)
+    else:
+      response[child.tag] = child.text or ''
+
+    # one-liner equivalent
+    # response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
+
+  return response
+
+@pytest.mark.fails_on_aws
+def test_account_usage():
+    # boto3.set_stream_logger(name='botocore')
+    client = get_client()
+    # adds the unordered query parameter
+    def add_usage(**kwargs):
+        kwargs['params']['url'] += "?usage"
+    client.meta.events.register('before-call.s3.ListBuckets', add_usage)
+    client.meta.events.register('after-call.s3.ListBuckets', get_http_response_body)
+    client.list_buckets()
+    xml    = ET.fromstring(http_response_body.decode('utf-8'))
+    parsed = parseXmlToJson(xml)
+    summary = parsed['Summary']
+    assert summary['QuotaMaxBytes'] == '-1'
+    assert summary['QuotaMaxBuckets'] == '1000'
+    assert summary['QuotaMaxObjCount'] == '-1'
+    assert summary['QuotaMaxBytesPerBucket'] == '-1'
+    assert summary['QuotaMaxObjCountPerBucket'] == '-1'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_head_bucket_usage():
+    # boto3.set_stream_logger(name='botocore')
+    client = get_client()
+    bucket_name = _create_objects(keys=['foo'])
+    # adds the unordered query parameter
+    client.meta.events.register('after-call.s3.HeadBucket', get_http_response)
+    client.head_bucket(Bucket=bucket_name)
+    hdrs = http_response['headers']
+    assert hdrs['X-RGW-Object-Count'] == '1'
+    assert hdrs['X-RGW-Bytes-Used'] == '3'
+    assert hdrs['X-RGW-Quota-User-Size'] == '-1'
+    assert hdrs['X-RGW-Quota-User-Objects'] == '-1'
+    assert hdrs['X-RGW-Quota-Max-Buckets'] == '1000'
+    assert hdrs['X-RGW-Quota-Bucket-Size'] == '-1'
+    assert hdrs['X-RGW-Quota-Bucket-Objects'] == '-1'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_unordered():
+    # boto3.set_stream_logger(name='botocore')
+    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
+               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
+               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
+               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
+               'xix', 'yak', 'zoo']
+    bucket_name = _create_objects(keys=keys_in)
+    client = get_client()
+
+    # adds the unordered query parameter
+    def add_unordered(**kwargs):
+        kwargs['params']['url'] += "&allow-unordered=true"
+    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
+
+    # test simple retrieval
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
+    unordered_keys_out = _get_keys(response)
+    assert len(keys_in) == len(unordered_keys_out)
+    assert keys_in.sort() == unordered_keys_out.sort()
+
+    # test retrieval with prefix
+    response = client.list_objects(Bucket=bucket_name,
+                                   MaxKeys=1000,
+                                   Prefix="abc/")
+    unordered_keys_out = _get_keys(response)
+    assert 5 == len(unordered_keys_out)
+
+    # test incremental retrieval with marker
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
+    unordered_keys_out = _get_keys(response)
+    assert 6 == len(unordered_keys_out)
+
+    # now get the next bunch
+    response = client.list_objects(Bucket=bucket_name,
+                                   MaxKeys=6,
+                                   Marker=unordered_keys_out[-1])
+    unordered_keys_out2 = _get_keys(response)
+    assert 6 == len(unordered_keys_out2)
+
+    # make sure there's no overlap between the incremental retrievals
+    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
+    assert 0 == len(intersect)
+
+    # verify that unordered used with delimiter results in error
+    e = assert_raises(ClientError,
+                      client.list_objects, Bucket=bucket_name, Delimiter="/")
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_unordered():
+    # boto3.set_stream_logger(name='botocore')
+    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
+               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
+               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
+               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
+               'xix', 'yak', 'zoo']
+    bucket_name = _create_objects(keys=keys_in)
+    client = get_client()
+
+    # adds the unordered query parameter
+    def add_unordered(**kwargs):
+        kwargs['params']['url'] += "&allow-unordered=true"
+    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
+
+    # test simple retrieval
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
+    unordered_keys_out = _get_keys(response)
+    assert len(keys_in) == len(unordered_keys_out)
+    assert keys_in.sort() == unordered_keys_out.sort()
+
+    # test retrieval with prefix
+    response = client.list_objects_v2(Bucket=bucket_name,
+                                   MaxKeys=1000,
+                                   Prefix="abc/")
+    unordered_keys_out = _get_keys(response)
+    assert 5 == len(unordered_keys_out)
+
+    # test incremental retrieval with marker
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
+    unordered_keys_out = _get_keys(response)
+    assert 6 == len(unordered_keys_out)
+
+    # now get the next bunch
+    response = client.list_objects_v2(Bucket=bucket_name,
+                                   MaxKeys=6,
+                                   StartAfter=unordered_keys_out[-1])
+    unordered_keys_out2 = _get_keys(response)
+    assert 6 == len(unordered_keys_out2)
+
+    # make sure there's no overlap between the incremental retrievals
+    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
+    assert 0 == len(intersect)
+
+    # verify that unordered used with delimiter results in error
+    e = assert_raises(ClientError,
+                      client.list_objects, Bucket=bucket_name, Delimiter="/")
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+
+def test_bucket_list_maxkeys_invalid():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    # adds invalid max keys to url
+    # before list_objects is called
+    def add_invalid_maxkeys(**kwargs):
+        kwargs['params']['url'] += "&max-keys=blah"
+    client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
+
+    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+
+
+def test_bucket_list_marker_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    assert response['Marker'] == ''
+
+
+def test_bucket_list_marker_empty():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='')
+    assert response['Marker'] == ''
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_continuationtoken_empty():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
+    assert response['ContinuationToken'] == ''
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_continuationtoken():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
+    next_continuation_token = response1['NextContinuationToken']
+
+    response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
+    assert response2['ContinuationToken'] == next_continuation_token
+    assert response2['IsTruncated'] == False
+    key_names2 = ['baz', 'foo', 'quxx']
+    keys = _get_keys(response2)
+    assert keys == key_names2
+
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_bucket_listv2_both_continuationtoken_startafter():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
+    next_continuation_token = response1['NextContinuationToken']
+
+    response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
+    assert response2['ContinuationToken'] == next_continuation_token
+    assert response2['StartAfter'] == 'bar'
+    assert response2['IsTruncated'] == False
+    key_names2 = ['foo', 'quxx']
+    keys = _get_keys(response2)
+    assert keys == key_names2
+
+def test_bucket_list_marker_unreadable():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
+    assert response['Marker'] == '\x0a'
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_startafter_unreadable():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
+    assert response['StartAfter'] == '\x0a'
+    assert response['IsTruncated'] == False
+    keys = _get_keys(response)
+    assert keys == key_names
+
+def test_bucket_list_marker_not_in_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='blah')
+    assert response['Marker'] == 'blah'
+    keys = _get_keys(response)
+    assert keys == [ 'foo','quxx']
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_startafter_not_in_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
+    assert response['StartAfter'] == 'blah'
+    keys = _get_keys(response)
+    assert keys == ['foo', 'quxx']
+
+def test_bucket_list_marker_after_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='zzz')
+    assert response['Marker'] == 'zzz'
+    keys = _get_keys(response)
+    assert response['IsTruncated'] == False
+    assert keys == []
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_startafter_after_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
+    assert response['StartAfter'] == 'zzz'
+    keys = _get_keys(response)
+    assert response['IsTruncated'] == False
+    assert keys == []
+
+def _compare_dates(datetime1, datetime2):
+    """
+    changes ms from datetime1 to 0, compares it to datetime2
+    """
+    # both times are in datetime format but datetime1 has
+    # microseconds and datetime2 does not
+    datetime1 = datetime1.replace(microsecond=0)
+    assert datetime1 == datetime2
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_return_data():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    data = {}
+    for key_name in key_names:
+        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
+        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
+        data.update({
+            key_name: {
+                'DisplayName': acl_response['Owner']['DisplayName'],
+                'ID': acl_response['Owner']['ID'],
+                'ETag': obj_response['ETag'],
+                'LastModified': obj_response['LastModified'],
+                'ContentLength': obj_response['ContentLength'],
+                }
+            })
+
+    response  = client.list_objects(Bucket=bucket_name)
+    objs_list = response['Contents']
+    for obj in objs_list:
+        key_name = obj['Key']
+        key_data = data[key_name]
+        assert obj['ETag'] == key_data['ETag']
+        assert obj['Size'] == key_data['ContentLength']
+        assert obj['Owner']['DisplayName'] == key_data['DisplayName']
+        assert obj['Owner']['ID'] == key_data['ID']
+        _compare_dates(obj['LastModified'],key_data['LastModified'])
+
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_list_return_data_versioning():
+    bucket_name = get_new_bucket()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
+
+    client = get_client()
+    data = {}
+
+    for key_name in key_names:
+        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
+        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
+        data.update({
+            key_name: {
+                'ID': acl_response['Owner']['ID'],
+                'DisplayName': acl_response['Owner']['DisplayName'],
+                'ETag': obj_response['ETag'],
+                'LastModified': obj_response['LastModified'],
+                'ContentLength': obj_response['ContentLength'],
+                'VersionId': obj_response['VersionId']
+                }
+            })
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    objs_list = response['Versions']
+
+    for obj in objs_list:
+        key_name = obj['Key']
+        key_data = data[key_name]
+        assert obj['Owner']['DisplayName'] == key_data['DisplayName']
+        assert obj['ETag'] == key_data['ETag']
+        assert obj['Size'] == key_data['ContentLength']
+        assert obj['Owner']['ID'] == key_data['ID']
+        assert obj['VersionId'] == key_data['VersionId']
+        _compare_dates(obj['LastModified'],key_data['LastModified'])
+
+def test_bucket_list_objects_anonymous():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    unauthenticated_client.list_objects(Bucket=bucket_name)
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_objects_anonymous():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    unauthenticated_client.list_objects_v2(Bucket=bucket_name)
+
+def test_bucket_list_objects_anonymous_fail():
+    bucket_name = get_new_bucket()
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.list_objects_v2
+def test_bucket_listv2_objects_anonymous_fail():
+    bucket_name = get_new_bucket()
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+def test_bucket_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+@pytest.mark.list_objects_v2
+def test_bucketv2_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_bucket_delete_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_bucket_delete_nonempty():
+    key_names = ['foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'BucketNotEmpty'
+
+def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
+    try:
+        client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
+        results[i] = True
+    except:
+        results[i] = False
+
+def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
+    t = []
+    for i in range(num):
+        thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
+        thr.start()
+        t.append(thr)
+    return t
+
+def _do_wait_completion(t):
+    for thr in t:
+        thr.join()
+
+def test_bucket_concurrent_set_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
+                     # this seems like a large enough number to get through retry (if bug
+                     # exists)
+    results = [None] * num_threads
+
+    t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
+    _do_wait_completion(t)
+
+    for r in results:
+        assert r == True
+
+def test_object_write_to_nonexist_bucket():
+    key_names = ['foo']
+    bucket_name = 'whatchutalkinboutwillis'
+    client = get_client()
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+
+def _ev_add_te_header(request, **kwargs):
+    request.headers.add_header('Transfer-Encoding', 'chunked')
+
+def test_object_write_with_chunked_transfer_encoding():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.meta.events.register_first('before-sign.*.*', _ev_add_te_header)
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+def test_bucket_create_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.delete_bucket(Bucket=bucket_name)
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_object_read_not_exist():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+http_response = None
+
+def get_http_response(**kwargs):
+    global http_response
+    http_response = kwargs['http_response'].__dict__
+
+@pytest.mark.fails_on_dbstore
+def test_object_requestid_matches_header_on_error():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # get http response after failed request
+    client.meta.events.register('after-call.s3.GetObject', get_http_response)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
+
+    response_body = http_response['_content']
+    resp_body_xml = ET.fromstring(response_body)
+    request_id = resp_body_xml.find('.//RequestId').text
+
+    assert request_id is not None
+    assert request_id == e.response['ResponseMetadata']['RequestId']
+
+def _make_objs_dict(key_names):
+    objs_list = []
+    for key in key_names:
+        obj_dict = {'Key': key}
+        objs_list.append(obj_dict)
+    objs_dict = {'Objects': objs_list}
+    return objs_dict
+
+def test_versioning_concurrent_multi_object_delete():
+    num_objects = 5
+    num_threads = 5
+    bucket_name = get_new_bucket()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key_names = ["key_{:d}".format(x) for x in range(num_objects)]
+    bucket = _create_objects(bucket_name=bucket_name, keys=key_names)
+
+    client = get_client()
+    versions = client.list_object_versions(Bucket=bucket_name)['Versions']
+    assert len(versions) == num_objects
+    objs_dict = {'Objects': [dict((k, v[k]) for k in ["Key", "VersionId"]) for v in versions]}
+    results = [None] * num_threads
+
+    def do_request(n):
+        results[n] = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+
+    t = []
+    for i in range(num_threads):
+        thr = threading.Thread(target = do_request, args=[i])
+        thr.start()
+        t.append(thr)
+    _do_wait_completion(t)
+
+    for response in results:
+        assert len(response['Deleted']) == num_objects
+        assert 'Errors' not in response
+
+    response = client.list_objects(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+def test_multi_object_delete():
+    key_names = ['key0', 'key1', 'key2']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+    response = client.list_objects(Bucket=bucket_name)
+    assert len(response['Contents']) == 3
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+
+    assert len(response['Deleted']) == 3
+    assert 'Errors' not in response
+    response = client.list_objects(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+    assert len(response['Deleted']) == 3
+    assert 'Errors' not in response
+    response = client.list_objects(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+@pytest.mark.list_objects_v2
+def test_multi_objectv2_delete():
+    key_names = ['key0', 'key1', 'key2']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert len(response['Contents']) == 3
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+
+    assert len(response['Deleted']) == 3
+    assert 'Errors' not in response
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+    assert len(response['Deleted']) == 3
+    assert 'Errors' not in response
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+def test_multi_object_delete_key_limit():
+    key_names = [f"key-{i}" for i in range(1001)]
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    paginator = client.get_paginator('list_objects')
+    pages = paginator.paginate(Bucket=bucket_name)
+    numKeys = 0
+    for page in pages:
+        numKeys += len(page['Contents'])
+    assert numKeys == 1001
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+def test_multi_objectv2_delete_key_limit():
+    key_names = [f"key-{i}" for i in range(1001)]
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    paginator = client.get_paginator('list_objects_v2')
+    pages = paginator.paginate(Bucket=bucket_name)
+    numKeys = 0
+    for page in pages:
+        numKeys += len(page['Contents'])
+    assert numKeys == 1001
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+def test_object_head_zero_bytes():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='')
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    assert response['ContentLength'] == 0
+
+def test_object_write_check_etag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['ETag'] == '"37b51d194a7513e45b56f6524f2d51f2"'
+
+def test_object_write_cache_control():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    cache_control = 'public, max-age=14400'
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == cache_control
+
+def test_object_write_expires():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    _compare_dates(expires, response['Expires'])
+
+def _get_body(response):
+    body = response['Body']
+    got = body.read()
+    if type(got) is bytes:
+        got = got.decode()
+    return got
+
+def test_object_write_read_update_read_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # Write
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    # Read
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+    # Update
+    client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
+    # Read
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'soup'
+    # Delete
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+def _set_get_metadata(metadata, bucket_name=None):
+    """
+    create a new bucket new or use an existing
+    name to create an object that bucket,
+    set the meta1 property to a specified, value,
+    and then re-read and return that property
+    """
+    if bucket_name is None:
+        bucket_name = get_new_bucket()
+
+    client = get_client()
+    metadata_dict = {'meta1': metadata}
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    return response['Metadata']['meta1']
+
+def test_object_set_get_metadata_none_to_good():
+    got = _set_get_metadata('mymeta')
+    assert got == 'mymeta'
+
+def test_object_set_get_metadata_none_to_empty():
+    got = _set_get_metadata('')
+    assert got == ''
+
+def test_object_set_get_metadata_overwrite_to_empty():
+    bucket_name = get_new_bucket()
+    got = _set_get_metadata('oldmeta', bucket_name)
+    assert got == 'oldmeta'
+    got = _set_get_metadata('', bucket_name)
+    assert got == ''
+
+# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
+@pytest.mark.fails_on_rgw
+def test_object_set_get_unicode_metadata():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    def set_unicode_metadata(**kwargs):
+        kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
+
+    client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    got = response['Metadata']['meta1']
+    print(got)
+    print(u"Hello World\xe9")
+    assert got == u"Hello World\xe9"
+
+def _set_get_metadata_unreadable(metadata, bucket_name=None):
+    """
+    set and then read back a meta-data value (which presumably
+    includes some interesting characters), and return a list
+    containing the stored value AND the encoding with which it
+    was returned.
+
+    This should return a 400 bad request because the webserver
+    rejects the request.
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    metadata_dict = {'meta1': metadata}
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
+    return e
+
+def test_object_metadata_replaced_on_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    metadata_dict = {'meta1': 'bar'}
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    got = response['Metadata']
+    assert got == {}
+
+def test_object_write_file():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data_str = 'bar'
+    data = bytes(data_str, 'utf-8')
+    client.put_object(Bucket=bucket_name, Key='foo', Body=data)
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def _get_post_url(bucket_name):
+    endpoint = get_config_endpoint()
+    return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
+
+def test_post_object_anonymous_request():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_post_object_authenticated_no_content_type():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key="foo.txt")
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_post_object_authenticated_request_bad_access_key():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_set_success_code():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("success_action_status" , "201"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 201
+    message = ET.fromstring(r.content).find('Key')
+    assert message.text == 'foo.txt'
+
+def test_post_object_set_invalid_success_code():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("success_action_status" , "404"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    content = r.content.decode()
+    assert content == ''
+
+def test_post_object_upload_larger_than_chunk():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 5*1024*1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    foo_string = 'foo' * 1024*1024
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', foo_string)])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == foo_string
+
+def test_post_object_set_key_from_filename():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_post_object_ignored_header():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+
+def test_post_object_case_insensitive_condition_fields():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bUcKeT": bucket_name},\
+    ["StArTs-WiTh", "$KeY", "foo"],\
+    {"AcL": "private"},\
+    ["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    foo_string = 'foo' * 1024*1024
+
+    payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+
+def test_post_object_escaped_field_values():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_post_object_success_redirect_action():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    redirect_url = _get_post_url(bucket_name)
+
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["eq", "$success_action_redirect", redirect_url],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 200
+    url = r.url
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    assert url == '{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(\
+    rurl = redirect_url, bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"'))
+
+def test_post_object_invalid_signature():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_invalid_access_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_invalid_date_format():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": str(expires),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_no_key_specified():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_missing_signature():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_missing_policy_condition():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_user_specified_header():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["starts-with", "$x-amz-meta-foo",  "bar"]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    assert response['Metadata']['foo'] == 'barclamp'
+
+def test_post_object_request_missing_policy_specified_field():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["starts-with", "$x-amz-meta-foo",  "bar"]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_condition_is_case_sensitive():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "CONDITIONS": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_expires_is_case_sensitive():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_expired_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_wrong_bucket():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "${filename}"),('bucket', bucket_name),\
+    ("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
+
+    bad_bucket_name = get_new_bucket()
+    url = _get_post_url(bad_bucket_name)
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_invalid_request_field_value():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["eq", "$x-amz-meta-foo",  ""]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 403
+
+def test_post_object_missing_expires_condition():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_missing_conditions_list():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_upload_size_limit_exceeded():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_missing_content_length_argument():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_invalid_content_length_argument():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", -1, 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_upload_size_below_minimum():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 512, 1000],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_post_object_upload_size_rgw_chunk_size_bug():
+    # Test for https://tracker.ceph.com/issues/58627
+    # TODO: if this value is different in Teuthology runs, this would need tuning
+    # https://github.com/ceph/ceph/blob/main/qa/suites/rgw/verify/striping%24/stripe-greater-than-chunk.yaml
+    _rgw_max_chunk_size = 4 * 2**20 # 4MiB
+    min_size = _rgw_max_chunk_size
+    max_size = _rgw_max_chunk_size * 3
+    # [(chunk),(small)]
+    test_payload_size = _rgw_max_chunk_size + 200 # extra bit to push it over the chunk boundary
+    # it should be valid when we run this test!
+    assert test_payload_size > min_size
+    assert test_payload_size < max_size
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", min_size, max_size],\
+    ]\
+    }
+
+    test_payload = 'x' * test_payload_size
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', (test_payload))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+
+def test_post_object_empty_conditions():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    { }\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 400
+
+def test_get_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    etag = response['ETag']
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_get_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+def test_get_object_ifnonematch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    etag = response['ETag']
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 304
+    assert e.response['Error']['Message'] == 'Not Modified'
+    assert e.response['ResponseMetadata']['HTTPHeaders']['etag'] == etag
+
+def test_get_object_ifnonematch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def test_get_object_ifmodifiedsince_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.fails_on_dbstore
+def test_get_object_ifmodifiedsince_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    etag = response['ETag']
+    last_modified = str(response['LastModified'])
+
+    last_modified = last_modified.split('+')[0]
+    mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
+
+    after = mtime + datetime.timedelta(seconds=1)
+    after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
+
+    time.sleep(1)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 304
+    assert e.response['Error']['Message'] == 'Not Modified'
+    assert e.response['ResponseMetadata']['HTTPHeaders']['etag'] == etag
+
+@pytest.mark.fails_on_dbstore
+def test_get_object_ifunmodifiedsince_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+def test_get_object_ifunmodifiedsince_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
+    body = _get_body(response)
+    assert body == 'bar'
+
+
+@pytest.mark.fails_on_aws
+def test_put_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    etag = response['ETag'].replace('"', '')
+
+    # pass in custom header 'If-Match' before PutObject call
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'zar'
+
+@pytest.mark.fails_on_dbstore
+def test_put_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    # pass in custom header 'If-Match' before PutObject call
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.fails_on_aws
+def test_put_object_ifmatch_overwrite_existed_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'zar'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_put_object_ifmatch_nonexisted_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+@pytest.mark.fails_on_aws
+def test_put_object_ifnonmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'zar'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_put_object_ifnonmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    etag = response['ETag'].replace('"', '')
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.fails_on_aws
+def test_put_object_ifnonmatch_nonexisted_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_put_object_ifnonmatch_overwrite_existed_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+def _setup_bucket_object_acl(bucket_acl, object_acl, client=None):
+    """
+    add a foo key, and specified key and bucket acls to
+    a (new or existing) bucket.
+    """
+    if client is None:
+        client = get_client()
+    bucket_name = get_new_bucket_name()
+    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
+    client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
+
+    return bucket_name
+
+def _setup_bucket_acl(bucket_acl=None):
+    """
+    set up a new bucket with specified acl
+    """
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
+
+    return bucket_name
+
+def test_object_raw_get():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_raw_get_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_object_delete_key_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_object_raw_get_object_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+def test_bucket_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.head_bucket(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_bucket_head_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    # n.b., RGW does not send a response document for this operation,
+    # which seems consistent with
+    # https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
+    #assert error_code == 'NoSuchKey'
+
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_bucket_head_extended():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.head_bucket(Bucket=bucket_name)
+    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 0
+    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 0
+
+    _create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
+    response = client.head_bucket(Bucket=bucket_name)
+
+    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 3
+    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 9
+
+def test_object_raw_get_bucket_acl():
+    bucket_name = _setup_bucket_object_acl('private', 'public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_raw_get_object_acl():
+    bucket_name = _setup_bucket_object_acl('public-read', 'private')
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+def test_object_put_acl_mtime():
+    key = 'foo'
+    bucket_name = get_new_bucket()
+    # Enable versioning
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    client = get_client()
+
+    content = 'foooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+    
+    obj_response = client.head_object(Bucket=bucket_name, Key=key)
+    create_mtime = obj_response['LastModified']
+
+    response  = client.list_objects(Bucket=bucket_name)
+    obj_list = response['Contents'][0]
+    _compare_dates(obj_list['LastModified'],create_mtime)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    obj_list = response['Versions'][0]
+    _compare_dates(obj_list['LastModified'],create_mtime)
+
+    # set acl
+    time.sleep(2)
+    client.put_object_acl(ACL='private',Bucket=bucket_name, Key=key)
+    
+    # mtime should match with create mtime
+    obj_response = client.head_object(Bucket=bucket_name, Key=key)
+    _compare_dates(create_mtime,obj_response['LastModified'])
+
+    response  = client.list_objects(Bucket=bucket_name)
+    obj_list = response['Contents'][0]
+    _compare_dates(obj_list['LastModified'],create_mtime)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    obj_list = response['Versions'][0]
+    _compare_dates(obj_list['LastModified'],create_mtime)
+
+def test_object_raw_authenticated():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_raw_response_headers():
+    bucket_name = _setup_bucket_object_acl('private', 'private')
+
+    client = get_client()
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == 'foo/bar'
+    assert response['ResponseMetadata']['HTTPHeaders']['content-disposition'] == 'bla'
+    assert response['ResponseMetadata']['HTTPHeaders']['content-language'] == 'esperanto'
+    assert response['ResponseMetadata']['HTTPHeaders']['content-encoding'] == 'aaa'
+    assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == 'no-cache'
+
+def test_object_raw_authenticated_bucket_acl():
+    bucket_name = _setup_bucket_object_acl('private', 'public-read')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_raw_authenticated_object_acl():
+    bucket_name = _setup_bucket_object_acl('public-read', 'private')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_raw_authenticated_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_object_raw_authenticated_object_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+def _test_object_raw_get_x_amz_expires_not_expired(client):
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read', client=client)
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
+
+    res = requests.options(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 400
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 200
+
+def test_object_raw_get_x_amz_expires_not_expired():
+    _test_object_raw_get_x_amz_expires_not_expired(client=get_client())
+
+def test_object_raw_get_x_amz_expires_not_expired_tenant():
+    _test_object_raw_get_x_amz_expires_not_expired(client=get_tenant_client())
+
+def test_object_raw_get_x_amz_expires_out_range_zero():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 403
+
+def test_object_raw_get_x_amz_expires_out_max_range():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 403
+
+def test_object_raw_get_x_amz_expires_out_positive_range():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 403
+
+
+def test_object_anon_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+def test_object_anon_put_write_access():
+    bucket_name = _setup_bucket_acl('public-read-write')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def test_object_put_authenticated():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def _test_object_presigned_put_object_with_acl(client=None):
+    if client is None:
+        client = get_client()
+
+    bucket_name = get_new_bucket(client)
+    key = 'foo'
+
+    params = {'Bucket': bucket_name, 'Key': key, 'ACL': 'private'}
+    url = client.generate_presigned_url(ClientMethod='put_object', Params=params, HttpMethod='PUT')
+
+    data = b'hello world'
+    headers = {'x-amz-acl': 'private'}
+    res = requests.put(url, data=data, headers=headers, verify=get_config_ssl_verify())
+    assert res.status_code == 200
+
+    params = {'Bucket': bucket_name, 'Key': key}
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify())
+    assert res.status_code == 200
+    assert res.text == 'hello world'
+
+def test_object_presigned_put_object_with_acl():
+    _test_object_presigned_put_object_with_acl(
+        client=get_client())
+
+def test_object_presigned_put_object_with_acl_tenant():
+    _test_object_presigned_put_object_with_acl(
+        client=get_tenant_client())
+
+def test_object_raw_put_authenticated_expired():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+    url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
+
+    # params wouldn't take a 'Body' parameter so we're passing it in here
+    res = requests.put(url, data="foo", verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 403
+
+def check_bad_bucket_name(bucket_name):
+    """
+    Attempt to create a bucket with a specified name, and confirm
+    that the request fails because of an invalid bucket name.
+    """
+    client = get_client()
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+
+# AWS does not enforce all documented bucket restrictions.
+# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
+@pytest.mark.fails_on_aws
+# Breaks DNS with SubdomainCallingFormat
+def test_bucket_create_naming_bad_starts_nonalpha():
+    bucket_name = get_new_bucket_name()
+    check_bad_bucket_name('_' + bucket_name)
+
+def check_invalid_bucketname(invalid_name):
+    """
+    Send a create bucket_request with an invalid bucket name
+    that will bypass the ParamValidationError that would be raised
+    if the invalid bucket name that was passed in normally.
+    This function returns the status and error code from the failure
+    """
+    client = get_client()
+    valid_bucket_name = get_new_bucket_name()
+    def replace_bucketname_from_url(**kwargs):
+        url = kwargs['params']['url']
+        new_url = url.replace(valid_bucket_name, invalid_name)
+        kwargs['params']['url'] = new_url
+    client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    return (status, error_code)
+
+def test_bucket_create_naming_bad_short_one():
+    check_bad_bucket_name('a')
+
+def test_bucket_create_naming_bad_short_two():
+    check_bad_bucket_name('aa')
+
+def check_good_bucket_name(name, _prefix=None):
+    """
+    Attempt to create a bucket with a specified name
+    and (specified or default) prefix, returning the
+    results of that effort.
+    """
+    # tests using this with the default prefix must *not* rely on
+    # being able to set the initial character, or exceed the max len
+
+    # tests using this with a custom prefix are responsible for doing
+    # their own setup/teardown nukes, with their custom prefix; this
+    # should be very rare
+    if _prefix is None:
+        _prefix = get_prefix()
+    bucket_name = '{prefix}{name}'.format(
+            prefix=_prefix,
+            name=name,
+            )
+    client = get_client()
+    response = client.create_bucket(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def _test_bucket_create_naming_good_long(length):
+    """
+    Attempt to create a bucket whose name (including the
+    prefix) is of a specified length.
+    """
+    # tests using this with the default prefix must *not* rely on
+    # being able to set the initial character, or exceed the max len
+
+    # tests using this with a custom prefix are responsible for doing
+    # their own setup/teardown nukes, with their custom prefix; this
+    # should be very rare
+    prefix = get_new_bucket_name()
+    assert len(prefix) < 63
+    num = length - len(prefix)
+    name=num*'a'
+
+    bucket_name = '{prefix}{name}'.format(
+            prefix=prefix,
+            name=name,
+            )
+    client = get_client()
+    response = client.create_bucket(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_60():
+    _test_bucket_create_naming_good_long(60)
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_61():
+    _test_bucket_create_naming_good_long(61)
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_62():
+    _test_bucket_create_naming_good_long(62)
+
+
+# Breaks DNS with SubdomainCallingFormat
+def test_bucket_create_naming_good_long_63():
+    _test_bucket_create_naming_good_long(63)
+
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_list_long_name():
+    prefix = get_new_bucket_name()
+    length = 61
+    num = length - len(prefix)
+    name=num*'a'
+
+    bucket_name = '{prefix}{name}'.format(
+            prefix=prefix,
+            name=name,
+            )
+    bucket = get_new_bucket_resource(name=bucket_name)
+    is_empty = _bucket_is_empty(bucket)
+    assert is_empty == True
+
+# AWS does not enforce all documented bucket restrictions.
+# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
+@pytest.mark.fails_on_aws
+def test_bucket_create_naming_bad_ip():
+    check_bad_bucket_name('192.168.5.123')
+
+# test_bucket_create_naming_dns_* are valid but not recommended
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_underscore():
+    invalid_bucketname = 'foo_bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+def test_bucket_create_naming_dns_long():
+    prefix = get_prefix()
+    assert len(prefix) < 50
+    num = 63 - len(prefix)
+    check_good_bucket_name(num * 'a')
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dash_at_end():
+    invalid_bucketname = 'foo-'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dot_dot():
+    invalid_bucketname = 'foo..bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dot_dash():
+    invalid_bucketname = 'foo.-bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+
+# Breaks DNS with SubdomainCallingFormat
+@pytest.mark.fails_on_aws
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dash_dot():
+    invalid_bucketname = 'foo-.bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    assert status == 400
+    assert error_code == 'InvalidBucketName'
+
+def test_bucket_create_exists():
+    # aws-s3 default region allows recreation of buckets
+    # but all other regions fail with BucketAlreadyOwnedByYou.
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    try:
+        response = client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        status, error_code = _get_status_and_error_code(e.response)
+        assert e.status == 409
+        assert e.error_code == 'BucketAlreadyOwnedByYou'
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_get_location():
+    location_constraint = get_main_api_name()
+    if not location_constraint:
+        pytest.skip('no api_name configured')
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
+
+    response = client.get_bucket_location(Bucket=bucket_name)
+    if location_constraint == "":
+        location_constraint = None
+    assert response['LocationConstraint'] == location_constraint
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_create_exists_nonowner():
+    # Names are shared across a global namespace. As such, no two
+    # users can create a bucket with that same name.
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    alt_client = get_alt_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'BucketAlreadyExists'
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_recreate_overwrite_acl():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name, ACL='public-read')
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'BucketAlreadyExists'
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_recreate_new_acl():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'BucketAlreadyExists'
+
+def check_access_denied(fn, *args, **kwargs):
+    e = assert_raises(ClientError, fn, *args, **kwargs)
+    status = _get_status(e.response)
+    assert status == 403
+
+
+def check_grants(got, want):
+    """
+    Check that grants list in got matches the dictionaries in want,
+    in any order.
+    """
+    assert len(got) == len(want)
+
+    # There are instances when got does not match due the order of item.
+    if got[0]["Grantee"].get("DisplayName"):
+        got.sort(key=lambda x: x["Grantee"].get("DisplayName"))
+        want.sort(key=lambda x: x["DisplayName"])
+
+    for g, w in zip(got, want):
+        w = dict(w)
+        g = dict(g)
+        assert g.pop('Permission', None) == w['Permission']
+        assert g['Grantee'].pop('DisplayName', None) == w['DisplayName']
+        assert g['Grantee'].pop('ID', None) == w['ID']
+        assert g['Grantee'].pop('Type', None) == w['Type']
+        assert g['Grantee'].pop('URI', None) == w['URI']
+        assert g['Grantee'].pop('EmailAddress', None) == w['EmailAddress']
+        assert g == {'Grantee': {}}
+
+
+def test_bucket_acl_default():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    assert response['Owner']['DisplayName'] == display_name
+    assert response['Owner']['ID'] == user_id
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_canned_during_create():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_bucket_acl_canned():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    client.put_bucket_acl(ACL='private', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_bucket_acl_canned_publicreadwrite():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_bucket_acl_canned_authenticatedread():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_put_bucket_acl_grant_group_read():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grant = {'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'}, 'Permission': 'READ'}
+    policy = add_bucket_user_grant(bucket_name, grant)
+
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    check_grants(
+        response['Grants'],
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_default():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned_during_create():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # Since it defaults to private, set it public-read first
+    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    # Then back to private.
+    client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+    grants = response['Grants']
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned_publicreadwrite():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned_authenticatedread():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned_bucketownerread():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
+    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
+    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
+
+    alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    alt_display_name = get_alt_display_name()
+    alt_user_id = get_alt_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ',
+                ID=bucket_owner_id,
+                DisplayName=bucket_owner_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def test_object_acl_canned_bucketownerfullcontrol():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
+    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
+    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
+
+    alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    alt_display_name = get_alt_display_name()
+    alt_user_id = get_alt_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=bucket_owner_id,
+                DisplayName=bucket_owner_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@pytest.mark.fails_on_aws
+def test_object_acl_full_control_verify_owner():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
+
+    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
+
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+    assert response['Owner']['ID'] == main_user_id
+
+def add_obj_user_grant(bucket_name, key, grant):
+    """
+    Adds a grant to the existing grants meant to be passed into
+    the AccessControlPolicy argument of put_object_acls for an object
+    owned by the main user, not the alt user
+    A grant is a dictionary in the form of:
+    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
+
+    """
+    client = get_client()
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key)
+
+    grants = response['Grants']
+    grants.append(grant)
+
+    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    return grant
+
+def test_object_acl_full_control_verify_attributes():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    header = {'x-amz-foo': 'bar'}
+    # lambda to add any header
+    add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
+
+    main_client.meta.events.register('before-call.s3.PutObject', add_header)
+    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = main_client.get_object(Bucket=bucket_name, Key='foo')
+    content_type = response['ContentType']
+    etag = response['ETag']
+
+    alt_user_id = get_alt_user_id()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+
+    grants = add_obj_user_grant(bucket_name, 'foo', grant)
+
+    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
+
+    response = main_client.get_object(Bucket=bucket_name, Key='foo')
+    assert content_type == response['ContentType']
+    assert etag == response['ETag']
+
+def test_bucket_acl_canned_private_to_private():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def add_bucket_user_grant(bucket_name, grant):
+    """
+    Adds a grant to the existing grants meant to be passed into
+    the AccessControlPolicy argument of put_object_acls for an object
+    owned by the main user, not the alt user
+    A grant is a dictionary in the form of:
+    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
+    """
+    client = get_client()
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    grants.append(grant)
+
+    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    return grant
+
+def _check_object_acl(permission):
+    """
+    Sets the permission on an object then checks to see
+    if it was set
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    policy = {}
+    policy['Owner'] = response['Owner']
+    policy['Grants'] = response['Grants']
+    policy['Grants'][0]['Permission'] = permission
+
+    client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+    grants = response['Grants']
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission=permission,
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+
+@pytest.mark.fails_on_aws
+def test_object_acl():
+    _check_object_acl('FULL_CONTROL')
+
+@pytest.mark.fails_on_aws
+def test_object_acl_write():
+    _check_object_acl('WRITE')
+
+@pytest.mark.fails_on_aws
+def test_object_acl_writeacp():
+    _check_object_acl('WRITE_ACP')
+
+
+@pytest.mark.fails_on_aws
+def test_object_acl_read():
+    _check_object_acl('READ')
+
+
+@pytest.mark.fails_on_aws
+def test_object_acl_readacp():
+    _check_object_acl('READ_ACP')
+
+
+def _bucket_acl_grant_userid(permission):
+    """
+    create a new bucket, grant a specific user the specified
+    permission, read back the acl and verify correct setting
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission=permission,
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    return bucket_name
+
+def _check_bucket_acl_grant_can_read(bucket_name):
+    """
+    verify ability to read the specified bucket
+    """
+    alt_client = get_alt_client()
+    response = alt_client.head_bucket(Bucket=bucket_name)
+
+def _check_bucket_acl_grant_cant_read(bucket_name):
+    """
+    verify inability to read the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
+
+def _check_bucket_acl_grant_can_readacp(bucket_name):
+    """
+    verify ability to read acls on specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.get_bucket_acl(Bucket=bucket_name)
+
+def _check_bucket_acl_grant_cant_readacp(bucket_name):
+    """
+    verify inability to read acls on specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
+
+def _check_bucket_acl_grant_can_write(bucket_name):
+    """
+    verify ability to write the specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
+
+def _check_bucket_acl_grant_cant_write(bucket_name):
+
+    """
+    verify inability to write the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
+
+def _check_bucket_acl_grant_can_writeacp(bucket_name):
+    """
+    verify ability to set acls on the specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+def _check_bucket_acl_grant_cant_writeacp(bucket_name):
+    """
+    verify inability to set acls on the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_userid_fullcontrol():
+    bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
+
+    # alt user can read
+    _check_bucket_acl_grant_can_read(bucket_name)
+    # can read acl
+    _check_bucket_acl_grant_can_readacp(bucket_name)
+    # can write
+    _check_bucket_acl_grant_can_write(bucket_name)
+    # can write acl
+    _check_bucket_acl_grant_can_writeacp(bucket_name)
+
+    client = get_client()
+
+    bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
+    owner_id = bucket_acl_response['Owner']['ID']
+    owner_display_name = bucket_acl_response['Owner']['DisplayName']
+
+    main_display_name = get_main_display_name()
+    main_user_id = get_main_user_id()
+
+    assert owner_id == main_user_id
+    assert owner_display_name == main_display_name
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_userid_read():
+    bucket_name = _bucket_acl_grant_userid('READ')
+
+    # alt user can read
+    _check_bucket_acl_grant_can_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can't write acl
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_userid_readacp():
+    bucket_name = _bucket_acl_grant_userid('READ_ACP')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can read acl
+    _check_bucket_acl_grant_can_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can't write acp
+    #_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_userid_write():
+    bucket_name = _bucket_acl_grant_userid('WRITE')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can write
+    _check_bucket_acl_grant_can_write(bucket_name)
+    # can't write acl
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_userid_writeacp():
+    bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can write acl
+    _check_bucket_acl_grant_can_writeacp(bucket_name)
+
+def test_bucket_acl_grant_nonexist_user():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    bad_user_id = '_foo'
+
+    #response = client.get_bucket_acl(Bucket=bucket_name)
+    grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+def _get_acl_header(user_id=None, perms=None):
+    all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
+    headers = []
+
+    if user_id == None:
+        user_id = get_alt_user_id()
+
+    if perms != None:
+        for perm in perms:
+            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
+            headers.append(header)
+
+    else:
+        for perm in all_headers:
+            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
+            headers.append(header)
+
+    return headers
+
+@pytest.mark.fails_on_dho
+@pytest.mark.fails_on_aws
+def test_object_header_acl_grants():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    headers = _get_acl_header()
+
+    def add_headers_before_sign(**kwargs):
+        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
+        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
+
+    client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
+
+    client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@pytest.mark.fails_on_dho
+@pytest.mark.fails_on_aws
+def test_bucket_header_acl_grants():
+    headers = _get_acl_header()
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    headers = _get_acl_header()
+
+    def add_headers_before_sign(**kwargs):
+        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
+        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
+
+    client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
+
+    client.create_bucket(Bucket=bucket_name)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    alt_client = get_alt_client()
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    # set bucket acl to public-read-write so that teardown can work
+    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+
+# This test will fail on DH Objects. DHO allows multiple users with one account, which
+# would violate the uniqueness requirement of a user's email. As such, DHO users are
+# created without an email.
+@pytest.mark.fails_on_aws
+def test_bucket_acl_grant_email():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+    alt_email_address = get_alt_email()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+        ]
+    )
+
+def test_bucket_acl_grant_email_not_exist():
+    # behavior not documented by amazon
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+    alt_email_address = get_alt_email()
+
+    NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
+    grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'UnresolvableGrantByEmailAddress'
+
+def test_bucket_acl_revoke_all():
+    # revoke all access, including the owner's access
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_bucket_acl(Bucket=bucket_name)
+    old_grants = response['Grants']
+    policy = {}
+    policy['Owner'] = response['Owner']
+    # clear grants
+    policy['Grants'] = []
+
+    # remove read/write permission for everyone
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    assert len(response['Grants']) == 0
+
+    # set policy back to original so that bucket can be cleaned up
+    policy['Grants'] = old_grants
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
+# http://tracker.newdream.net/issues/984
+@pytest.mark.fails_on_rgw
+def test_logging_toggle():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    main_display_name = get_main_display_name()
+    main_user_id = get_main_user_id()
+
+    status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
+
+    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
+    client.get_bucket_logging(Bucket=bucket_name)
+    status = {'LoggingEnabled': {}}
+    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
+    # NOTE: this does not actually test whether or not logging works
+
+def _setup_access(bucket_acl, object_acl):
+    """
+    Simple test fixture: create a bucket with given ACL, with objects:
+    - a: owning user, given ACL
+    - a2: same object accessed by some other user
+    - b: owning user, default ACL in bucket w/given ACL
+    - b2: same object accessed by a some other user
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = 'foo'
+    key2 = 'bar'
+    newkey = 'new'
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
+    client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
+    client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
+    client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
+
+    return bucket_name, key1, key2, newkey
+
+def get_bucket_key_names(bucket_name):
+    objs_list = get_objects_list(bucket_name)
+    return frozenset(obj for obj in objs_list)
+
+def list_bucket_storage_class(client, bucket_name):
+    result = defaultdict(list)
+    response  = client.list_object_versions(Bucket=bucket_name)
+    for k in response['Versions']:
+        result[k['StorageClass']].append(k)
+
+    return result
+
+def list_bucket_versions(client, bucket_name):
+    result = defaultdict(list)
+    response  = client.list_object_versions(Bucket=bucket_name)
+    for k in response['Versions']:
+        result[response['Name']].append(k)
+
+    return result
+
+def test_access_bucket_private_object_private():
+    # all the test_access_* tests follow this template
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
+
+    alt_client = get_alt_client()
+    # acled object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    # default object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    # bucket read fail
+    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
+
+    # acled object write fail
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
+    # anymore. This can be solved either by:
+    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
+    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
+    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
+
+    alt_client2 = get_alt_client()
+    # default object write fail
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    # bucket write fail
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@pytest.mark.list_objects_v2
+def test_access_bucket_private_objectv2_private():
+    # all the test_access_* tests follow this template
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
+
+    alt_client = get_alt_client()
+    # acled object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    # default object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    # bucket read fail
+    check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
+
+    # acled object write fail
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
+    # anymore. This can be solved either by:
+    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
+    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
+    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
+
+    alt_client2 = get_alt_client()
+    # default object write fail
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    # bucket write fail
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_private_object_publicread():
+
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read, b gets default (private)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@pytest.mark.list_objects_v2
+def test_access_bucket_private_objectv2_publicread():
+
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read, b gets default (private)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_private_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a private bucket
+    # b gets default (private)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@pytest.mark.list_objects_v2
+def test_access_bucket_private_objectv2_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a private bucket
+    # b gets default (private)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_publicread_object_private():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
+    alt_client = get_alt_client()
+
+    # a should be private, b gets default (private)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    assert objs == ['bar', 'foo']
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_publicread_object_publicread():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    # a should be public-read, b gets default (private)
+    body = _get_body(response)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    assert objs == ['bar', 'foo']
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+
+def test_access_bucket_publicread_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a r/o bucket
+    # b gets default (private)
+    assert body == 'foocontent'
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    assert objs == ['bar', 'foo']
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+
+def test_access_bucket_publicreadwrite_object_private():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
+    alt_client = get_alt_client()
+
+    # a should be private, b gets default (private)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    assert objs == ['bar', 'foo']
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_publicreadwrite_object_publicread():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
+    alt_client = get_alt_client()
+
+    # a should be public-read, b gets default (private)
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+    assert body == 'foocontent'
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    assert objs == ['bar', 'foo']
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_access_bucket_publicreadwrite_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+    body = _get_body(response)
+
+    # a should be public-read-write, b gets default (private)
+    assert body == 'foocontent'
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    assert objs == ['bar', 'foo']
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+def test_buckets_create_then_list():
+    client = get_client()
+    bucket_names = []
+    for i in range(5):
+        bucket_name = get_new_bucket_name()
+        bucket_names.append(bucket_name)
+
+    for name in bucket_names:
+        client.create_bucket(Bucket=name)
+
+    response = client.list_buckets()
+    bucket_dicts = response['Buckets']
+    buckets_list = []
+
+    buckets_list = get_buckets_list()
+
+    for name in bucket_names:
+        if name not in buckets_list:
+            raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
+
+def test_buckets_list_ctime():
+    # check that creation times are within a day
+    before = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
+
+    client = get_client()
+    for i in range(5):
+        client.create_bucket(Bucket=get_new_bucket_name())
+
+    response = client.list_buckets()
+    for bucket in response['Buckets']:
+        ctime = bucket['CreationDate']
+        assert before <= ctime, '%r > %r' % (before, ctime)
+
+@pytest.mark.fails_on_aws
+def test_list_buckets_anonymous():
+    # Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
+    # emulating standard HTTP access.
+    #
+    # While it may have been possible to use httplib directly, doing it this way takes care of also
+    # allowing us to vary the calling format in testing.
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.list_buckets()
+    assert len(response['Buckets']) == 0
+
+def test_list_buckets_invalid_auth():
+    bad_auth_client = get_bad_auth_client()
+    e = assert_raises(ClientError, bad_auth_client.list_buckets)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+def test_list_buckets_bad_auth():
+    main_access_key = get_main_aws_access_key()
+    bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
+    e = assert_raises(ClientError, bad_auth_client.list_buckets)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+@pytest.fixture
+def override_prefix_a():
+    nuke_prefixed_buckets(prefix='a'+get_prefix())
+    yield
+    nuke_prefixed_buckets(prefix='a'+get_prefix())
+
+# this test goes outside the user-configure prefix because it needs to
+# control the initial character of the bucket name
+def test_bucket_create_naming_good_starts_alpha(override_prefix_a):
+    check_good_bucket_name('foo', _prefix='a'+get_prefix())
+
+@pytest.fixture
+def override_prefix_0():
+    nuke_prefixed_buckets(prefix='0'+get_prefix())
+    yield
+    nuke_prefixed_buckets(prefix='0'+get_prefix())
+
+# this test goes outside the user-configure prefix because it needs to
+# control the initial character of the bucket name
+def test_bucket_create_naming_good_starts_digit(override_prefix_0):
+    check_good_bucket_name('foo', _prefix='0'+get_prefix())
+
+def test_bucket_create_naming_good_contains_period():
+    check_good_bucket_name('aaa.111')
+
+def test_bucket_create_naming_good_contains_hyphen():
+    check_good_bucket_name('aaa-111')
+
+def test_bucket_recreate_not_overriding():
+    key_names = ['mykey1', 'mykey2']
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+    assert key_names == objs_list
+
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+
+    objs_list = get_objects_list(bucket_name)
+    assert key_names == objs_list
+
+@pytest.mark.fails_on_dbstore
+def test_bucket_create_special_key_names():
+    key_names = [
+        ' ',
+        '"',
+        '$',
+        '%',
+        '&',
+        '\'',
+        '<',
+        '>',
+        '_',
+        '_ ',
+        '_ _',
+        '__',
+    ]
+
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+    assert key_names == objs_list
+
+    client = get_client()
+
+    for name in key_names:
+        assert name in objs_list
+        response = client.get_object(Bucket=bucket_name, Key=name)
+        body = _get_body(response)
+        assert name == body
+        client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
+
+def test_bucket_list_special_prefix():
+    key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+
+    assert len(objs_list) == 5
+
+    objs_list = get_objects_list(bucket_name, prefix='_bla/')
+    assert len(objs_list) == 4
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_zero_size():
+    key = 'foo123bar'
+    bucket_name = _create_objects(keys=[key])
+    fp_a = FakeWriteFile(0, '')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
+
+    copy_source = {'Bucket': bucket_name, 'Key': key}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    assert response['ContentLength'] == 0
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_16m():
+    bucket_name = get_new_bucket()
+    key1 = 'obj1'
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key1, Body=bytearray(16*1024*1024))
+
+    copy_source = {'Bucket': bucket_name, 'Key': key1}
+    key2 = 'obj2'
+    client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    assert response['ContentLength'] == 16*1024*1024
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_same_bucket():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    body = _get_body(response)
+    assert 'foo' == body
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_verify_contenttype():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    content_type = 'text/bla'
+    client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    body = _get_body(response)
+    assert 'foo' == body
+    response_content_type = response['ContentType']
+    assert response_content_type == content_type
+
+def test_object_copy_to_itself():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRequest'
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_to_itself_with_metadata():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    metadata = {'foo': 'bar'}
+
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
+    response = client.get_object(Bucket=bucket_name, Key='foo123bar')
+    assert response['Metadata'] == metadata
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_diff_bucket():
+    bucket_name1 = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+
+    client = get_client()
+    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name2, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
+    body = _get_body(response)
+    assert 'foo' == body
+
+def test_object_copy_not_owned_bucket():
+    client = get_client()
+    alt_client = get_alt_client()
+    bucket_name1 = get_new_bucket_name()
+    bucket_name2 = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name1)
+    alt_client.create_bucket(Bucket=bucket_name2)
+
+    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
+
+    e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+def test_object_copy_not_owned_object_bucket():
+    client = get_client()
+    alt_client = get_alt_client()
+    bucket_name = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name)
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    alt_user_id = get_alt_user_id()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+    grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
+    client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
+
+    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    alt_client.copy(copy_source, bucket_name, 'bar321foo')
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    alt_client = get_alt_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
+    # check ACL is applied by doing GET from another user
+    alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
+
+
+    metadata={'abc': 'def'}
+    copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
+    client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
+
+    # check ACL is applied by doing GET from another user
+    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_retaining_metadata():
+    for size in [3, 1024 * 1024]:
+        bucket_name = get_new_bucket()
+        client = get_client()
+        content_type = 'audio/ogg'
+
+        metadata = {'key1': 'value1', 'key2': 'value2'}
+        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
+
+        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
+
+        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+        assert content_type == response['ContentType']
+        assert metadata == response['Metadata']
+        body = _get_body(response)
+        assert size == response['ContentLength']
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_replacing_metadata():
+    for size in [3, 1024 * 1024]:
+        bucket_name = get_new_bucket()
+        client = get_client()
+        content_type = 'audio/ogg'
+
+        metadata = {'key1': 'value1', 'key2': 'value2'}
+        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
+
+        metadata = {'key3': 'value3', 'key2': 'value2'}
+        content_type = 'audio/mpeg'
+
+        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
+
+        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+        assert content_type == response['ContentType']
+        assert metadata == response['Metadata']
+        assert size == response['ContentLength']
+
+def test_object_copy_bucket_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
+    status = _get_status(e.response)
+    assert status == 404
+
+def test_object_copy_key_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
+    status = _get_status(e.response)
+    assert status == 404
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_versioned_bucket():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    size = 1*5
+    data = bytearray(size)
+    data_str = data.decode()
+    key1 = 'foo123bar'
+    client.put_object(Bucket=bucket_name, Key=key1, Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    version_id = response['VersionId']
+
+    # copy object in the same bucket
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key2 = 'bar321foo'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    body = _get_body(response)
+    assert data_str == body
+    assert size == response['ContentLength']
+
+
+    # second copy
+    version_id2 = response['VersionId']
+    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
+    key3 = 'bar321foo2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
+    response = client.get_object(Bucket=bucket_name, Key=key3)
+    body = _get_body(response)
+    assert data_str == body
+    assert size == response['ContentLength']
+
+    # copy to another versioned bucket
+    bucket_name2 = get_new_bucket()
+    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key4 = 'bar321foo3'
+    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
+    response = client.get_object(Bucket=bucket_name2, Key=key4)
+    body = _get_body(response)
+    assert data_str == body
+    assert size == response['ContentLength']
+
+    # copy to another non versioned bucket
+    bucket_name3 = get_new_bucket()
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key5 = 'bar321foo4'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
+    response = client.get_object(Bucket=bucket_name3, Key=key5)
+    body = _get_body(response)
+    assert data_str == body
+    assert size == response['ContentLength']
+
+    # copy from a non versioned bucket
+    copy_source = {'Bucket': bucket_name3, 'Key': key5}
+    key6 = 'foo123bar2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
+    response = client.get_object(Bucket=bucket_name, Key=key6)
+    body = _get_body(response)
+    assert data_str == body
+    assert size == response['ContentLength']
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_versioned_url_encoding():
+    bucket = get_new_bucket_resource()
+    check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
+    src_key = 'foo?bar'
+    src = bucket.put_object(Key=src_key)
+    src.load() # HEAD request tests that the key exists
+
+    # copy object in the same bucket
+    dst_key = 'bar&foo'
+    dst = bucket.Object(dst_key)
+    dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
+    dst.load() # HEAD request tests that the key exists
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        if this_part_size > len(s):
+            s = s + strpart[0:this_part_size - len(s)]
+        yield s
+        if (x == size):
+            return
+
+def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+    if client == None:
+        client = get_client()
+
+
+    if content_type == None and metadata == None:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    else:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
+
+    upload_id = response['UploadId']
+    s = ''
+    parts = []
+    for i, part in enumerate(generate_random(size, part_size)):
+        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
+        part_num = i+1
+        s += part
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
+        if i in resend_parts:
+            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+
+    return (upload_id, s, parts)
+
+@pytest.mark.fails_on_dbstore
+def test_object_copy_versioning_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key1 = "srcmultipart"
+    key1_metadata = {'foo': 'bar'}
+    content_type = 'text/bla'
+    objlen = 30 * 1024 * 1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    key1_size = response['ContentLength']
+    version_id = response['VersionId']
+
+    # copy object in the same bucket
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key2 = 'dstmultipart'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    version_id2 = response['VersionId']
+    body = _get_body(response)
+    assert data == body
+    assert key1_size == response['ContentLength']
+    assert key1_metadata == response['Metadata']
+    assert content_type == response['ContentType']
+
+    # second copy
+    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
+    key3 = 'dstmultipart2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
+    response = client.get_object(Bucket=bucket_name, Key=key3)
+    body = _get_body(response)
+    assert data == body
+    assert key1_size == response['ContentLength']
+    assert key1_metadata == response['Metadata']
+    assert content_type == response['ContentType']
+
+    # copy to another versioned bucket
+    bucket_name2 = get_new_bucket()
+    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
+
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key4 = 'dstmultipart3'
+    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
+    response = client.get_object(Bucket=bucket_name2, Key=key4)
+    body = _get_body(response)
+    assert data == body
+    assert key1_size == response['ContentLength']
+    assert key1_metadata == response['Metadata']
+    assert content_type == response['ContentType']
+
+    # copy to another non versioned bucket
+    bucket_name3 = get_new_bucket()
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key5 = 'dstmultipart4'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
+    response = client.get_object(Bucket=bucket_name3, Key=key5)
+    body = _get_body(response)
+    assert data == body
+    assert key1_size == response['ContentLength']
+    assert key1_metadata == response['Metadata']
+    assert content_type == response['ContentType']
+
+    # copy from a non versioned bucket
+    copy_source = {'Bucket': bucket_name3, 'Key': key5}
+    key6 = 'dstmultipart5'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
+    response = client.get_object(Bucket=bucket_name3, Key=key6)
+    body = _get_body(response)
+    assert data == body
+    assert key1_size == response['ContentLength']
+    assert key1_metadata == response['Metadata']
+    assert content_type == response['ContentType']
+
+def test_multipart_upload_empty():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = "mymultipart"
+    objlen = 0
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
+    e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_upload_small():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = "mymultipart"
+    objlen = 1
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    assert response['ContentLength'] == objlen
+    # check extra client.complete_multipart_upload
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
+    if bucket_name is None:
+        bucket_name = get_new_bucket()
+
+    if client == None:
+        client = get_client()
+
+    data_str = str(next(generate_random(size, size)))
+    data = bytes(data_str, 'utf-8')
+    client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
+
+    return bucket_name
+
+def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
+
+    if(client == None):
+        client = get_client()
+
+    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
+    upload_id = response['UploadId']
+
+    if(version_id == None):
+        copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    else:
+        copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
+
+    parts = []
+
+    i = 0
+    for start_offset in range(0, size, part_size):
+        end_offset = min(start_offset + part_size - 1, size - 1)
+        part_num = i+1
+        copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
+        response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
+        parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
+        i = i+1
+
+    return (upload_id, parts)
+
+def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
+    client = get_client()
+
+    if(version_id == None):
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key)
+    else:
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
+    src_size = response['ContentLength']
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    dest_size = response['ContentLength']
+    dest_data = _get_body(response)
+    assert(src_size >= dest_size)
+
+    r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
+    if(version_id == None):
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
+    else:
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
+    src_data = _get_body(response)
+    assert src_data == dest_data
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_copy_small():
+    src_key = 'foo'
+    src_bucket_name = _create_key_with_random_content(src_key)
+
+    dest_bucket_name = get_new_bucket()
+    dest_key = "mymultipart"
+    size = 1
+    client = get_client()
+
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    assert size == response['ContentLength']
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+def test_multipart_copy_invalid_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=5)
+
+    response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
+    upload_id = response['UploadId']
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
+
+    e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
+    status, error_code = _get_status_and_error_code(e.response)
+    valid_status = [400, 416]
+    if not status in valid_status:
+       raise AssertionError("Invalid response " + str(status))
+    assert error_code == 'InvalidRange'
+
+
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
+@pytest.mark.fails_on_rgw
+def test_multipart_copy_improper_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=5)
+
+    response = client.create_multipart_upload(
+        Bucket=src_bucket_name, Key='dest')
+    upload_id = response['UploadId']
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    test_ranges = ['{start}-{end}'.format(start=0, end=2),
+                   'bytes={start}'.format(start=0),
+                   'bytes=hello-world',
+                   'bytes=0-bar',
+                   'bytes=hello-',
+                   'bytes=0-2,3-5']
+
+    for test_range in test_ranges:
+        e = assert_raises(ClientError, client.upload_part_copy,
+                          Bucket=src_bucket_name, Key='dest',
+                          UploadId=upload_id,
+                          CopySource=copy_source,
+                          CopySourceRange=test_range,
+                          PartNumber=1)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 400
+        assert error_code == 'InvalidArgument'
+
+
+def test_multipart_copy_without_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=10)
+    dest_bucket_name = get_new_bucket_name()
+    get_new_bucket(name=dest_bucket_name)
+    dest_key = "mymultipartcopy"
+
+    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
+    upload_id = response['UploadId']
+    parts = []
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    part_num = 1
+    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
+
+    response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
+
+    parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    assert response['ContentLength'] == 10
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_copy_special_names():
+    src_bucket_name = get_new_bucket()
+
+    dest_bucket_name = get_new_bucket()
+
+    dest_key = "mymultipart"
+    size = 1
+    client = get_client()
+
+    for src_key in (' ', '_', '__', '?versionId'):
+        _create_key_with_random_content(src_key, bucket_name=src_bucket_name)
+        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+        assert size == response['ContentLength']
+        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+def _check_content_using_range(key, bucket_name, data, step):
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    size = response['ContentLength']
+
+    for ofs in range(0, size, step):
+        toread = size - ofs
+        if toread > step:
+            toread = step
+        end = ofs + toread - 1
+        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
+        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
+        assert response['ContentLength'] == toread
+        body = _get_body(response)
+        assert body == data[ofs:end+1]
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_upload():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    content_type='text/bla'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    client = get_client()
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    # check extra client.complete_multipart_upload
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    assert response['ContentType'] == content_type
+    assert response['Metadata'] == metadata
+    body = _get_body(response)
+    assert len(body) == response['ContentLength']
+    assert body == data
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+def check_versioning(bucket_name, status):
+    client = get_client()
+
+    try:
+        response = client.get_bucket_versioning(Bucket=bucket_name)
+        assert response['Status'] == status
+    except KeyError:
+        assert status == None
+
+# amazon is eventual consistent, retry a bit if failed
+def check_configure_versioning_retry(bucket_name, status, expected_string):
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
+
+    read_status = None
+
+    for i in range(5):
+        try:
+            response = client.get_bucket_versioning(Bucket=bucket_name)
+            read_status = response['Status']
+        except KeyError:
+            read_status = None
+
+        if (expected_string == read_status):
+            break
+
+        time.sleep(1)
+
+    assert expected_string == read_status
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_copy_versioned():
+    src_bucket_name = get_new_bucket()
+    dest_bucket_name = get_new_bucket()
+
+    dest_key = "mymultipart"
+    check_versioning(src_bucket_name, None)
+
+    src_key = 'foo'
+    check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
+
+    size = 15 * 1024 * 1024
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+
+    version_id = []
+    client = get_client()
+    response = client.list_object_versions(Bucket=src_bucket_name)
+    for ver in response['Versions']:
+        version_id.append(ver['VersionId'])
+
+    for vid in version_id:
+        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
+        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+        assert size == response['ContentLength']
+        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
+
+def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
+    content_type = 'text/bla'
+    metadata = {'foo': 'bar'}
+    client = get_client()
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    assert response['ContentType'] == content_type
+    assert response['Metadata'] == metadata
+    body = _get_body(response)
+    assert len(body) == response['ContentLength']
+    assert body == data
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_upload_resend_part():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    objlen = 30 * 1024 * 1024
+
+    _check_upload_multipart_resend(bucket_name, key, objlen, [0])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [1])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [2])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
+
+def test_multipart_upload_multiple_sizes():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    client = get_client()
+
+    objlen = 5*1024*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 5*1024*1024+100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 5*1024*1024+600*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024+100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024+600*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_copy_multiple_sizes():
+    src_key = 'foo'
+    src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
+
+    dest_bucket_name = get_new_bucket()
+    dest_key="mymultipart"
+    client = get_client()
+
+    size = 5*1024*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 5*1024*1024+100*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 5*1024*1024+600*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024+100*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024+600*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+def test_multipart_upload_size_too_small():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    client = get_client()
+
+    size = 100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'EntityTooSmall'
+
+def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
+    return ''.join(random.choice(chars) for _ in range(size))
+
+def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
+    payload=gen_rand_string(5)*1024*1024
+    client = get_client()
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+
+    for part_num in range(0, num_parts):
+        part = bytes(payload, 'utf-8')
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
+
+    last_payload = '123'*1024*1024
+    last_part = bytes(last_payload, 'utf-8')
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
+
+    res = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert res['ETag'] != ''
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    test_string = _get_body(response)
+
+    all_payload = payload*num_parts + last_payload
+
+    assert test_string == all_payload
+
+    return all_payload
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_upload_contents():
+    bucket_name = get_new_bucket()
+    _do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
+
+def test_multipart_upload_overwrite_existing_object():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'mymultipart'
+    payload='12345'*1024*1024
+    num_parts=2
+    client.put_object(Bucket=bucket_name, Key=key, Body=payload)
+
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+
+    for part_num in range(0, num_parts):
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
+
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    test_string = _get_body(response)
+
+    assert test_string == payload*num_parts
+
+def test_abort_multipart_upload():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    objlen = 10 * 1024 * 1024
+    client = get_client()
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert 'Contents' not in response
+
+def test_abort_multipart_upload_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    client.put_object(Bucket=bucket_name, Key=key)
+
+    e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchUpload'
+
+@pytest.mark.fails_on_dbstore
+def test_list_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    mb = 1024 * 1024
+
+    upload_ids = []
+    (upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
+    upload_ids.append(upload_id1)
+    (upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
+    upload_ids.append(upload_id2)
+
+    key2="mymultipart2"
+    (upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
+    upload_ids.append(upload_id3)
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    uploads = response['Uploads']
+    resp_uploadids = []
+
+    for i in range(0, len(uploads)):
+        resp_uploadids.append(uploads[i]['UploadId'])
+
+    for i in range(0, len(upload_ids)):
+        assert True == (upload_ids[i] in resp_uploadids)
+
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
+
+@pytest.mark.fails_on_dbstore
+def test_list_multipart_upload_owner():
+    bucket_name = get_new_bucket()
+
+    client1 = get_client()
+    user1 = get_main_user_id()
+    name1 = get_main_display_name()
+
+    client2 = get_alt_client()
+    user2  = get_alt_user_id()
+    name2 = get_alt_display_name()
+
+    # add bucket acl for public read/write access
+    client1.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+    key1 = 'multipart1'
+    key2 = 'multipart2'
+    upload1 = client1.create_multipart_upload(Bucket=bucket_name, Key=key1)['UploadId']
+    try:
+        upload2 = client2.create_multipart_upload(Bucket=bucket_name, Key=key2)['UploadId']
+        try:
+            # match fields of an Upload from ListMultipartUploadsResult
+            def match(upload, key, uploadid, userid, username):
+                assert upload['Key'] == key
+                assert upload['UploadId'] == uploadid
+                assert upload['Initiator']['ID'] == userid
+                assert upload['Initiator']['DisplayName'] == username
+                assert upload['Owner']['ID'] == userid
+                assert upload['Owner']['DisplayName'] == username
+
+            # list uploads with client1
+            uploads1 = client1.list_multipart_uploads(Bucket=bucket_name)['Uploads']
+            assert len(uploads1) == 2
+            match(uploads1[0], key1, upload1, user1, name1)
+            match(uploads1[1], key2, upload2, user2, name2)
+
+            # list uploads with client2
+            uploads2 = client2.list_multipart_uploads(Bucket=bucket_name)['Uploads']
+            assert len(uploads2) == 2
+            match(uploads2[0], key1, upload1, user1, name1)
+            match(uploads2[1], key2, upload2, user2, name2)
+        finally:
+            client2.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload2)
+    finally:
+        client1.abort_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload1)
+
+def test_multipart_upload_missing_part():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    size = 1
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
+    # 'PartNumber should be 1'
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
+
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidPart'
+
+def test_multipart_upload_incorrect_etag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    size = 1
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
+    # 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
+    parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
+
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidPart'
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_get_part():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "mymultipart"
+
+    part_size = 5*1024*1024
+    part_sizes = 3 * [part_size] + [1*1024*1024]
+    part_count = len(part_sizes)
+    total_size = sum(part_sizes)
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name, key, total_size, part_size, resend_parts=[2])
+
+    # request part before complete
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=1)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert len(parts) == part_count
+
+    for part, size in zip(parts, part_sizes):
+        response = client.head_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
+        assert response['PartsCount'] == part_count
+        assert response['ETag'] == '"{}"'.format(part['ETag'])
+
+        response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
+        assert response['PartsCount'] == part_count
+        assert response['ETag'] == '"{}"'.format(part['ETag'])
+        assert response['ContentLength'] == size
+        # compare contents
+        for chunk in response['Body'].iter_chunks():
+            assert chunk.decode() == data[0:len(chunk)]
+            data = data[len(chunk):]
+
+    # request PartNumber out of range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=5)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidPart'
+
+@pytest.mark.fails_on_dbstore
+def test_multipart_single_get_part():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "mymultipart"
+
+    part_size = 5*1024*1024
+    part_sizes = [part_size] # just one part
+    part_count = len(part_sizes)
+    total_size = sum(part_sizes)
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name, key, total_size, part_size)
+
+    # request part before complete
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=1)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert len(parts) == part_count
+
+    for part, size in zip(parts, part_sizes):
+        response = client.head_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
+        assert response['PartsCount'] == part_count
+        assert response['ETag'] == '"{}"'.format(part['ETag'])
+
+        response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
+        assert response['PartsCount'] == part_count
+        assert response['ETag'] == '"{}"'.format(part['ETag'])
+        assert response['ContentLength'] == size
+        # compare contents
+        for chunk in response['Body'].iter_chunks():
+            assert chunk.decode() == data[0:len(chunk)]
+            data = data[len(chunk):]
+
+    # request PartNumber out of range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=5)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidPart'
+
+@pytest.mark.fails_on_dbstore
+def test_non_multipart_get_part():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "singlepart"
+
+    response = client.put_object(Bucket=bucket_name, Key=key, Body='body')
+    etag = response['ETag']
+
+    # request for PartNumber > 1 results in InvalidPart
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=2)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidPart'
+
+    # request for PartNumber = 1 gives back the entire object
+    response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=1)
+    assert response['ETag'] == etag
+    assert _get_body(response) == 'body'
+
+
+def _simple_http_req_100_cont(host, port, is_secure, method, resource):
+    """
+    Send the specified request w/expect 100-continue
+    and await confirmation.
+    """
+    req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
+            method=method,
+            resource=resource,
+            host=host,
+            )
+
+    req = bytes(req_str, 'utf-8')
+
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    if is_secure:
+        s = ssl.wrap_socket(s);
+    s.settimeout(5)
+    s.connect((host, port))
+    s.send(req)
+
+    try:
+        data = s.recv(1024)
+    except socket.error as msg:
+        print('got response: ', msg)
+        print('most likely server doesn\'t support 100-continue')
+
+    s.close()
+    data_str = data.decode()
+    l = data_str.split(' ')
+
+    assert l[0].startswith('HTTP')
+
+    return l[1]
+
+def test_100_continue():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    objname='testobj'
+    resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
+
+    host = get_config_host()
+    port = get_config_port()
+    is_secure = get_config_is_secure()
+
+    #NOTES: this test needs to be tested when is_secure is True
+    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
+    assert status == '403'
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
+    assert status == '100'
+
+def test_set_cors():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    allowed_methods = ['GET', 'PUT']
+    allowed_origins = ['*.get', '*.put']
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': allowed_methods,
+             'AllowedOrigins': allowed_origins,
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    assert status == 404
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+    response = client.get_bucket_cors(Bucket=bucket_name)
+    assert response['CORSRules'][0]['AllowedMethods'] == allowed_methods
+    assert response['CORSRules'][0]['AllowedOrigins'] == allowed_origins
+
+    client.delete_bucket_cors(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    assert status == 404
+
+def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
+    r = func(url, headers=headers, verify=get_config_ssl_verify())
+    assert r.status_code == expect_status
+
+    assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
+    assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
+
+def test_cors_origin_response():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*suffix'],
+            },
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['start*end'],
+            },
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['prefix*'],
+            },
+            {'AllowedMethods': ['PUT'],
+             'AllowedOrigins': ['*.put'],
+            }
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    assert status == 404
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+
+    _cors_request_and_check(requests.get, url, None, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
+
+    obj_url = '{u}/{o}'.format(u=url, o='bar')
+    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
+                                                    'content-length': '0'}, 403, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
+                                                    'content-length': '0'}, 403, None, None)
+
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
+                                                    'content-length': '0'}, 403, None, None)
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
+
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
+
+    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
+
+    _cors_request_and_check(requests.options, url, None, 400, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
+    _cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
+                                                    'content-length': '0'}, 200, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
+
+def test_cors_origin_wildcard():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*'],
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    assert status == 404
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+
+    _cors_request_and_check(requests.get, url, None, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
+
+def test_cors_header_option():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*'],
+             'ExposeHeaders': ['x-amz-meta-header1'],
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    assert status == 404
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+    obj_url = '{u}/{o}'.format(u=url, o='bar')
+
+    _cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
+
+def _test_cors_options_presigned_method(client, method, cannedACL=None):
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read', client=client)
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    if cannedACL is not None:
+        params['ACL'] = cannedACL
+
+    if method == 'get_object':
+        httpMethod = 'GET'
+    elif method == 'put_object':
+        httpMethod = 'PUT'
+    else:
+        raise ValueError('invalid method')
+
+    url = client.generate_presigned_url(ClientMethod=method, Params=params, ExpiresIn=100000, HttpMethod=httpMethod)
+
+    res = requests.options(url, verify=get_config_ssl_verify()).__dict__
+    assert res['status_code'] == 400
+
+    allowed_methods = [httpMethod]
+    allowed_origins = ['example']
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': allowed_methods,
+             'AllowedOrigins': allowed_origins,
+            },
+        ]
+    }
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    headers = {
+        'Origin': 'example',
+        'Access-Control-Request-Method': httpMethod,
+    }
+    _cors_request_and_check(requests.options, url, headers,
+                            200, 'example', httpMethod)
+
+def test_cors_presigned_get_object():
+    _test_cors_options_presigned_method(
+        client=get_client(),
+        method='get_object',
+    )
+
+def test_cors_presigned_get_object_tenant():
+    _test_cors_options_presigned_method(
+        client=get_tenant_client(),
+        method='get_object',
+    )
+
+def test_cors_presigned_put_object():
+    _test_cors_options_presigned_method(
+        client=get_client(),
+        method='put_object',
+    )
+
+def test_cors_presigned_put_object_with_acl():
+    _test_cors_options_presigned_method(
+        client=get_client(),
+        method='put_object',
+        cannedACL='private',
+    )
+
+def test_cors_presigned_put_object_tenant():
+    _test_cors_options_presigned_method(
+        client=get_tenant_client(),
+        method='put_object',
+    )
+
+def test_cors_presigned_put_object_tenant_with_acl():
+    _test_cors_options_presigned_method(
+        client=get_tenant_client(),
+        method='put_object',
+        cannedACL='private',
+    )
+
+@pytest.mark.tagging
+def test_set_bucket_tagging():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tags={
+        'TagSet': [
+            {
+                'Key': 'Hello',
+                'Value': 'World'
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchTagSet'
+
+    client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
+
+    response = client.get_bucket_tagging(Bucket=bucket_name)
+    assert len(response['TagSet']) == 1
+    assert response['TagSet'][0]['Key'] == 'Hello'
+    assert response['TagSet'][0]['Value'] == 'World'
+
+    response = client.delete_bucket_tagging(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchTagSet'
+
+
+class FakeFile(object):
+    """
+    file that simulates seek, tell, and current character
+    """
+    def __init__(self, char='A', interrupt=None):
+        self.offset = 0
+        self.char = bytes(char, 'utf-8')
+        self.interrupt = interrupt
+
+    def seek(self, offset, whence=os.SEEK_SET):
+        if whence == os.SEEK_SET:
+            self.offset = offset
+        elif whence == os.SEEK_END:
+            self.offset = self.size + offset;
+        elif whence == os.SEEK_CUR:
+            self.offset += offset
+
+    def tell(self):
+        return self.offset
+
+class FakeWriteFile(FakeFile):
+    """
+    file that simulates interruptable reads of constant data
+    """
+    def __init__(self, size, char='A', interrupt=None):
+        FakeFile.__init__(self, char, interrupt)
+        self.size = size
+
+    def read(self, size=-1):
+        if size < 0:
+            size = self.size - self.offset
+        count = min(size, self.size - self.offset)
+        self.offset += count
+
+        # Sneaky! do stuff before we return (the last time)
+        if self.interrupt != None and self.offset == self.size and count > 0:
+            self.interrupt()
+
+        return self.char*count
+
+class FakeReadFile(FakeFile):
+    """
+    file that simulates writes, interrupting after the second
+    """
+    def __init__(self, size, char='A', interrupt=None):
+        FakeFile.__init__(self, char, interrupt)
+        self.interrupted = False
+        self.size = 0
+        self.expected_size = size
+
+    def write(self, chars):
+        assert chars == self.char*len(chars)
+        self.offset += len(chars)
+        self.size += len(chars)
+
+        # Sneaky! do stuff on the second seek
+        if not self.interrupted and self.interrupt != None \
+                and self.offset > 0:
+            self.interrupt()
+            self.interrupted = True
+
+    def close(self):
+        assert self.size == self.expected_size
+
+class FakeFileVerifier(object):
+    """
+    file that verifies expected data has been written
+    """
+    def __init__(self, char=None):
+        self.char = char
+        self.size = 0
+
+    def write(self, data):
+        size = len(data)
+        if self.char == None:
+            self.char = data[0]
+        self.size += size
+        assert data.decode() == self.char*size
+
+def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
+    """
+    Make sure file is of the expected size and (simulated) content
+    """
+    fp_verify = FakeFileVerifier(char)
+    client = get_client()
+    client.download_fileobj(bucket_name, key, fp_verify)
+    if size >= 0:
+        assert fp_verify.size == size
+
+def _test_atomic_read(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B')
+    fp_a2 = FakeReadFile(file_size, 'A',
+        lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
+        )
+
+    read_client = get_client()
+
+    read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
+    fp_a2.close()
+
+    _verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
+
+def test_atomic_read_1mb():
+    _test_atomic_read(1024*1024)
+
+def test_atomic_read_4mb():
+    _test_atomic_read(1024*1024*4)
+
+def test_atomic_read_8mb():
+    _test_atomic_read(1024*1024*8)
+
+def _test_atomic_write(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Verify the contents are all A's.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Before re-set continues, verify content's still A's
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    objname = 'testobj'
+
+
+    # create <file_size> file of A's
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+
+    # verify A's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+
+    # create <file_size> file of B's
+    # but try to verify the file before we finish writing all the B's
+    fp_b = FakeWriteFile(file_size, 'B',
+        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+        )
+
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify B's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+def test_atomic_write_1mb():
+    _test_atomic_write(1024*1024)
+
+def test_atomic_write_4mb():
+    _test_atomic_write(1024*1024*4)
+
+def test_atomic_write_8mb():
+    _test_atomic_write(1024*1024*8)
+
+def _test_atomic_dual_write(file_size):
+    """
+    create an object, two sessions writing different contents
+    confirm that it is all one or the other
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=objname)
+
+    # write <file_size> file of B's
+    # but before we're done, try to write all A's
+    fp_a = FakeWriteFile(file_size, 'A')
+
+    def rewind_put_fp_a():
+        fp_a.seek(0)
+        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify the file
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+def test_atomic_dual_write_1mb():
+    _test_atomic_dual_write(1024*1024)
+
+def test_atomic_dual_write_4mb():
+    _test_atomic_dual_write(1024*1024*4)
+
+def test_atomic_dual_write_8mb():
+    _test_atomic_dual_write(1024*1024*8)
+
+def _test_atomic_conditional_write(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Verify the contents are all A's.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Before re-set continues, verify content's still A's
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+
+    # create <file_size> file of A's
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B',
+        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+        )
+
+    # create <file_size> file of B's
+    # but try to verify the file before we finish writing all the B's
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify B's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@pytest.mark.fails_on_aws
+def test_atomic_conditional_write_1mb():
+    _test_atomic_conditional_write(1024*1024)
+
+def _test_atomic_dual_conditional_write(file_size):
+    """
+    create an object, two sessions writing different contents
+    confirm that it is all one or the other
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+
+    fp_a = FakeWriteFile(file_size, 'A')
+    response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+    etag_fp_a = response['ETag'].replace('"', '')
+
+    # write <file_size> file of C's
+    # but before we're done, try to write all B's
+    fp_b = FakeWriteFile(file_size, 'B')
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    def rewind_put_fp_b():
+        fp_b.seek(0)
+        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+    # verify the file
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@pytest.mark.fails_on_aws
+# TODO: test not passing with SSL, fix this
+@pytest.mark.fails_on_rgw
+def test_atomic_dual_conditional_write_1mb():
+    _test_atomic_dual_conditional_write(1024*1024)
+
+@pytest.mark.fails_on_aws
+# TODO: test not passing with SSL, fix this
+@pytest.mark.fails_on_rgw
+def test_atomic_write_bucket_gone():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    def remove_bucket():
+        client.delete_bucket(Bucket=bucket_name)
+
+    objname = 'foo'
+    fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchBucket'
+
+def test_atomic_multipart_upload_write():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
+    upload_id = response['UploadId']
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+    client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    assert body == 'bar'
+
+class Counter:
+    def __init__(self, default_val):
+        self.val = default_val
+
+    def inc(self):
+        self.val = self.val + 1
+
+class ActionOnCount:
+    def __init__(self, trigger_count, action):
+        self.count = 0
+        self.trigger_count = trigger_count
+        self.action = action
+        self.result = 0
+
+    def trigger(self):
+        self.count = self.count + 1
+
+        if self.count == self.trigger_count:
+            self.result = self.action()
+
+def test_multipart_resend_first_finishes_last():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key_name = "mymultipart"
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
+    upload_id = response['UploadId']
+
+    #file_size = 8*1024*1024
+    file_size = 8
+
+    counter = Counter(0)
+    # upload_part might read multiple times from the object
+    # first time when it calculates md5, second time when it writes data
+    # out. We want to interject only on the last time, but we can't be
+    # sure how many times it's going to read, so let's have a test run
+    # and count the number of reads
+
+    fp_dry_run = FakeWriteFile(file_size, 'C',
+        lambda: counter.inc()
+        )
+
+    parts = []
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
+
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    client.delete_object(Bucket=bucket_name, Key=key_name)
+
+    # clear parts
+    parts[:] = []
+
+    # ok, now for the actual test
+    fp_b = FakeWriteFile(file_size, 'B')
+    def upload_fp_b():
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+
+    action = ActionOnCount(counter.val, lambda: upload_fp_b())
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
+    upload_id = response['UploadId']
+
+    fp_a = FakeWriteFile(file_size, 'A',
+        lambda: action.trigger()
+        )
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
+
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    _verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
+
+@pytest.mark.fails_on_dbstore
+def test_ranged_request_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
+
+    fetched_content = _get_body(response)
+    assert fetched_content == content[4:8]
+    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-7/11'
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
+
+def _generate_random_string(size):
+    return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
+
+@pytest.mark.fails_on_dbstore
+def test_ranged_big_request_response_code():
+    content = _generate_random_string(8*1024*1024)
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
+
+    fetched_content = _get_body(response)
+    assert fetched_content == content[3145728:5242881]
+    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 3145728-5242880/8388608'
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
+
+@pytest.mark.fails_on_dbstore
+def test_ranged_request_skip_leading_bytes_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
+
+    fetched_content = _get_body(response)
+    assert fetched_content == content[4:]
+    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11'
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
+
+@pytest.mark.fails_on_dbstore
+def test_ranged_request_return_trailing_bytes_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
+
+    fetched_content = _get_body(response)
+    assert fetched_content == content[-7:]
+    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11'
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
+
+def test_ranged_request_invalid_range():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+
+    # test invalid range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 416
+    assert error_code == 'InvalidRange'
+
+def test_ranged_request_empty_object():
+    content = ''
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+
+    # test invalid range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 416
+    assert error_code == 'InvalidRange'
+
+def test_versioning_bucket_create_suspend():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+def check_obj_content(client, bucket_name, key, version_id, content):
+    response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+    if content is not None:
+        body = _get_body(response)
+        assert body == content
+    else:
+        assert response['DeleteMarker'] == True
+
+def check_obj_versions(client, bucket_name, key, version_ids, contents):
+    # check to see if objects is pointing at correct version
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = []
+    versions = response['Versions']
+    # obj versions in versions come out created last to first not first to last like version_ids & contents
+    versions.reverse()
+    i = 0
+
+    for version in versions:
+        assert version['VersionId'] == version_ids[i]
+        assert version['Key'] == key
+        check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
+        i += 1
+
+def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
+    contents = contents or []
+    version_ids = version_ids or []
+
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        version_id = response['VersionId']
+
+        contents.append(body)
+        version_ids.append(version_id)
+
+#    if check_versions:
+#        check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+    return (version_ids, contents)
+
+def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
+    assert len(version_ids) == len(contents)
+    index = index % len(version_ids)
+    rm_version_id = version_ids.pop(index)
+    rm_content = contents.pop(index)
+
+    check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
+
+    if len(version_ids) != 0:
+        check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+def clean_up_bucket(client, bucket_name, key, version_ids):
+    for version_id in version_ids:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    client.delete_bucket(Bucket=bucket_name)
+
+def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    idx = remove_start_idx
+
+    for j in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+        idx += idx_inc
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    if 'Versions' in response:
+        print(response['Versions'])
+
+
+def test_versioning_obj_create_read_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
+    key = 'testobj'
+    num_versions = 5
+
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
+
+def test_versioning_obj_create_read_remove_head():
+    bucket_name = get_new_bucket()
+
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
+    key = 'testobj'
+    num_versions = 5
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    # removes old head object, checks new one
+    removed_version_id = version_ids.pop()
+    contents.pop()
+    num_versions = num_versions-1
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == contents[-1]
+
+    # add a delete marker
+    response = client.delete_object(Bucket=bucket_name, Key=key)
+    assert response['DeleteMarker'] == True
+
+    delete_marker_version_id = response['VersionId']
+    version_ids.append(delete_marker_version_id)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert len(response['Versions']) == num_versions
+    assert len(response['DeleteMarkers']) == 1
+    assert response['DeleteMarkers'][0]['VersionId'] == delete_marker_version_id
+
+    clean_up_bucket(client, bucket_name, key, version_ids)
+
+def test_versioning_obj_plain_null_version_removal():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjfoo'
+    content = 'fooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+def test_versioning_obj_plain_null_version_overwrite():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjfoo'
+    content = 'fooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    content2 = 'zzz'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == content2
+
+    version_id = response['VersionId']
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == content
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+def test_versioning_obj_plain_null_version_overwrite_suspended():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjbar'
+    content = 'foooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    content2 = 'zzz'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == content2
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    # original object with 'null' version id still counts as a version
+    assert len(response['Versions']) == 1
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'NoSuchKey'
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
+    client.delete_object(Bucket=bucket_name, Key=key)
+
+    # clear out old null objects in lists since they will get overwritten
+    assert len(version_ids) == len(contents)
+    i = 0
+    for version_id in version_ids:
+        if version_id == 'null':
+            version_ids.pop(i)
+            contents.pop(i)
+        i += 1
+
+    return (version_ids, contents)
+
+def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    # clear out old null objects in lists since they will get overwritten
+    assert len(version_ids) == len(contents)
+    i = 0
+    for version_id in version_ids:
+        if version_id == 'null':
+            version_ids.pop(i)
+            contents.pop(i)
+        i += 1
+
+    # add new content with 'null' version id to the end
+    contents.append(content)
+    version_ids.append('null')
+
+    return (version_ids, contents)
+
+
+def test_versioning_obj_suspend_versions():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 5
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
+    num_versions += 3
+
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    assert len(version_ids) == 0
+    assert len(version_ids) == len(contents)
+
+@pytest.mark.fails_on_dbstore
+def test_versioning_obj_suspended_copy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key1 = 'testobj1'
+    num_versions = 1
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key1, num_versions)
+
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    content = 'null content'
+    overwrite_suspended_versioning_obj(client, bucket_name, key1, version_ids, contents, content)
+
+    # copy to another object
+    key2 = 'testobj2'
+    copy_source = {'Bucket': bucket_name, 'Key': key1}
+    client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source)
+
+    # delete the source object. keep the 'null' entry in version_ids
+    client.delete_object(Bucket=bucket_name, Key=key1)
+
+    # get the target object
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    body = _get_body(response)
+    assert body == content
+
+    # cleaning up
+    client.delete_object(Bucket=bucket_name, Key=key2)
+    client.delete_object(Bucket=bucket_name, Key=key2, VersionId='null')
+
+    clean_up_bucket(client, bucket_name, key1, version_ids)
+
+def test_versioning_obj_create_versions_remove_all():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 10
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    assert len(version_ids) == 0
+    assert len(version_ids) == len(contents)
+
+def test_versioning_obj_create_versions_remove_special_names():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    keys = ['_testobj', '_', ':', ' ']
+    num_versions = 10
+
+    for key in keys:
+        (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+        for idx in range(num_versions):
+            remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+        assert len(version_ids) == 0
+        assert len(version_ids) == len(contents)
+
+@pytest.mark.fails_on_dbstore
+def test_versioning_obj_create_overwrite_multipart():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 3
+    contents = []
+    version_ids = []
+
+    for i in range(num_versions):
+        ret =  _do_test_multipart_upload_contents(bucket_name, key, 3)
+        contents.append(ret)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    for version in response['Versions']:
+        version_ids.append(version['VersionId'])
+
+    version_ids.reverse()
+    check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    assert len(version_ids) == 0
+    assert len(version_ids) == len(contents)
+
+def test_versioning_obj_list_marker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    key2 = 'testobj-1'
+    num_versions = 5
+
+    contents = []
+    version_ids = []
+    contents2 = []
+    version_ids2 = []
+
+    # for key #1
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        version_id = response['VersionId']
+
+        contents.append(body)
+        version_ids.append(version_id)
+
+    # for key #2
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
+        version_id = response['VersionId']
+
+        contents2.append(body)
+        version_ids2.append(version_id)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    # obj versions in versions come out created last to first not first to last like version_ids & contents
+    versions.reverse()
+
+    i = 0
+    # test the last 5 created objects first
+    for i in range(5):
+        version = versions[i]
+        assert version['VersionId'] == version_ids2[i]
+        assert version['Key'] == key2
+        check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
+        i += 1
+
+    # then the first 5
+    for j in range(5):
+        version = versions[i]
+        assert version['VersionId'] == version_ids[j]
+        assert version['Key'] == key
+        check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
+        i += 1
+
+@pytest.mark.fails_on_dbstore
+def test_versioning_copy_obj_version():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    for i in range(num_versions):
+        new_key_name = 'key_{i}'.format(i=i)
+        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
+        response = client.get_object(Bucket=bucket_name, Key=new_key_name)
+        body = _get_body(response)
+        assert body == contents[i]
+
+    another_bucket_name = get_new_bucket()
+
+    for i in range(num_versions):
+        new_key_name = 'key_{i}'.format(i=i)
+        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
+        client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
+        response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
+        body = _get_body(response)
+        assert body == contents[i]
+
+    new_key_name = 'new_key'
+    copy_source = {'Bucket': bucket_name, 'Key': key}
+    client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
+
+    response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
+    body = _get_body(response)
+    assert body == contents[-1]
+
+def test_versioning_multi_object_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+    num_versions = 2
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+    assert len(version_ids) == 2
+
+    # delete both versions
+    objects = [{'Key': key, 'VersionId': v} for v in version_ids]
+    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+    # now remove again, should all succeed due to idempotency
+    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+def test_versioning_multi_object_delete_with_marker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+    num_versions = 2
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+    assert len(version_ids) == num_versions
+    objects = [{'Key': key, 'VersionId': v} for v in version_ids]
+
+    # create a delete marker
+    response = client.delete_object(Bucket=bucket_name, Key=key)
+    assert response['DeleteMarker']
+    objects += [{'Key': key, 'VersionId': response['VersionId']}]
+
+    # delete all versions
+    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+    assert not 'DeleteMarkers' in response
+
+    # now remove again, should all succeed due to idempotency
+    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+    assert not 'DeleteMarkers' in response
+
+@pytest.mark.fails_on_dbstore
+def test_versioning_multi_object_delete_with_marker_create():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+
+    # use delete_objects() to create a delete marker
+    response = client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
+    assert len(response['Deleted']) == 1
+    assert response['Deleted'][0]['DeleteMarker']
+    delete_marker_version_id = response['Deleted'][0]['DeleteMarkerVersionId']
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    delete_markers = response['DeleteMarkers']
+
+    assert len(delete_markers) == 1
+    assert delete_marker_version_id == delete_markers[0]['VersionId']
+    assert key == delete_markers[0]['Key']
+
+def test_versioned_object_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'xyz'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    version_id = version_ids[1]
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    assert response['Owner']['DisplayName'] == display_name
+    assert response['Owner']['ID'] == user_id
+
+    grants = response['Grants']
+    default_policy = [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ]
+
+    check_grants(grants, default_policy)
+
+    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    client.put_object(Bucket=bucket_name, Key=key)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key)
+    grants = response['Grants']
+    check_grants(grants, default_policy)
+
+@pytest.mark.fails_on_dbstore
+def test_versioned_object_acl_no_version_specified():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'xyz'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    version_id = response['VersionId']
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    assert response['Owner']['DisplayName'] == display_name
+    assert response['Owner']['ID'] == user_id
+
+    grants = response['Grants']
+    default_policy = [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ]
+
+    check_grants(grants, default_policy)
+
+    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def _do_create_object(client, bucket_name, key, i):
+    body = 'data {i}'.format(i=i)
+    client.put_object(Bucket=bucket_name, Key=key, Body=body)
+
+def _do_remove_ver(client, bucket_name, key, version_id):
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
+    t = []
+    for i in range(num):
+        thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
+        thr.start()
+        t.append(thr)
+    return t
+
+def _do_clear_versioned_bucket_concurrent(client, bucket_name):
+    t = []
+    response = client.list_object_versions(Bucket=bucket_name)
+    for version in response.get('Versions', []):
+        thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
+        thr.start()
+        t.append(thr)
+    return t
+
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
+@pytest.mark.fails_on_rgw
+def test_versioned_concurrent_object_create_concurrent_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'myobj'
+    num_versions = 5
+
+    for i in range(5):
+        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
+        _do_wait_completion(t)
+
+        response = client.list_object_versions(Bucket=bucket_name)
+        versions = response['Versions']
+
+        assert len(versions) == num_versions
+
+        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+        _do_wait_completion(t)
+
+        response = client.list_object_versions(Bucket=bucket_name)
+        assert not 'Versions' in response
+
+def test_versioned_concurrent_object_create_and_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'myobj'
+    num_versions = 3
+
+    all_threads = []
+
+    for i in range(3):
+
+        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
+        all_threads.append(t)
+
+        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+        all_threads.append(t)
+
+    for t in all_threads:
+        _do_wait_completion(t)
+
+    t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+    _do_wait_completion(t)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    assert not 'Versions' in response
+
+@pytest.mark.lifecycle
+def test_lifecycle_set():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+def test_lifecycle_get():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
+    assert response['Rules'] == rules
+
+@pytest.mark.lifecycle
+def test_lifecycle_get_no_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
+    current_lc = response['Rules']
+
+    Rule = namedtuple('Rule',['prefix','status','days'])
+    rules = {'rule1' : Rule('test1/','Enabled',31),
+             'rule2' : Rule('test2/','Enabled',120)}
+
+    for lc_rule in current_lc:
+        if lc_rule['Prefix'] == rules['rule1'].prefix:
+            assert lc_rule['Expiration']['Days'] == rules['rule1'].days
+            assert lc_rule['Status'] == rules['rule1'].status
+            assert 'ID' in lc_rule
+        elif lc_rule['Prefix'] == rules['rule2'].prefix:
+            assert lc_rule['Expiration']['Days'] == rules['rule2'].days
+            assert lc_rule['Status'] == rules['rule2'].status
+            assert 'ID' in lc_rule
+        else:
+            # neither of the rules we supplied was returned, something wrong
+            print("rules not right")
+            assert False
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration():
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    time.sleep(lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    keep2_objects = response['Contents']
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire3_objects = response['Contents']
+
+    assert len(init_objects) == 6
+    assert len(expire1_objects) == 4
+    assert len(keep2_objects) == 4
+    assert len(expire3_objects) == 2
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.list_objects_v2
+@pytest.mark.fails_on_dbstore
+def test_lifecyclev2_expiration():
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    time.sleep(lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    keep2_objects = response['Contents']
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    expire3_objects = response['Contents']
+
+    assert len(init_objects) == 6
+    assert len(expire1_objects) == 4
+    assert len(keep2_objects) == 4
+    assert len(expire3_objects) == 2
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+def test_lifecycle_expiration_versioning_enabled():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 1)
+    client.delete_object(Bucket=bucket_name, Key="test1/a")
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    delete_markers = response['DeleteMarkers']
+    assert len(versions) == 1
+    assert len(delete_markers) == 1
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+def test_lifecycle_expiration_tags1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tom_key = 'days1/tom'
+    tom_tagset = {'TagSet':
+                  [{'Key': 'tom', 'Value': 'sawyer'}]}
+
+    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
+                                         Tagging=tom_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': 1,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': 'days1/',
+                    'Tag': {
+                        'Key': 'tom',
+                        'Value': 'sawyer'
+                    },
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+
+    try:
+        expire_objects = response['Contents']
+    except KeyError:
+        expire_objects = []
+
+    assert len(expire_objects) == 0
+
+# factor out common setup code
+def setup_lifecycle_tags2(client, bucket_name):
+    tom_key = 'days1/tom'
+    tom_tagset = {'TagSet':
+                  [{'Key': 'tom', 'Value': 'sawyer'}]}
+
+    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
+                                         Tagging=tom_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    huck_key = 'days1/huck'
+    huck_tagset = {
+        'TagSet':
+        [{'Key': 'tom', 'Value': 'sawyer'},
+         {'Key': 'huck', 'Value': 'finn'}]}
+
+    client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
+                                         Tagging=huck_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': 1,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': 'days1/',
+                    'Tag': {
+                        'Key': 'tom',
+                        'Value': 'sawyer'
+                    },
+                    'And': {
+                        'Prefix': 'days1',
+                        'Tags': [
+                            {
+                                'Key': 'huck',
+                                'Value': 'finn'
+                            },
+                        ]
+                    }
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    return response
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_tags2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = setup_lifecycle_tags2(client, bucket_name)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    assert len(expire1_objects) == 1
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_versioned_tags2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # mix in versioning
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    response = setup_lifecycle_tags2(client, bucket_name)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    assert len(expire1_objects) == 1
+
+# setup for scenario based on vidushi mishra's in rhbz#1877737
+def setup_lifecycle_noncur_tags(client, bucket_name, days):
+
+    # first create and tag the objects (10 versions of 1)
+    key = "myobject_"
+    tagset = {'TagSet':
+              [{'Key': 'vidushi', 'Value': 'mishra'}]}
+
+    for ix in range(10):
+        body = "%s v%d" % (key, ix)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+        response = client.put_object_tagging(Bucket=bucket_name, Key=key,
+                                             Tagging=tagset)
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'NoncurrentVersionExpiration': {
+                    'NoncurrentDays': days,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': '',
+                    'Tag': {
+                        'Key': 'vidushi',
+                        'Value': 'mishra'
+                    },
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    return response
+
+def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
+    time.sleep(secs)
+    try:
+        response  = client.list_object_versions(Bucket=bucket_name)
+        objs_list = response['Versions']
+    except:
+        objs_list = []
+    return len(objs_list)
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_noncur_tags1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    # create 10 object versions (9 noncurrent) and a tag-filter
+    # noncurrent version expiration at 4 "days"
+    response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
+
+    lc_interval = get_lc_debug_interval()
+
+    num_objs = verify_lifecycle_expiration_noncur_tags(
+        client, bucket_name, 2*lc_interval)
+
+    # at T+20, 10 objects should exist
+    assert num_objs == 10
+
+    num_objs = verify_lifecycle_expiration_noncur_tags(
+        client, bucket_name, 5*lc_interval)
+
+    # at T+60, only the current object version should exist
+    assert num_objs == 1
+
+def wait_interval_list_object_versions(client, bucket_name, secs):
+    time.sleep(secs)
+    try:
+        response  = client.list_object_versions(Bucket=bucket_name)
+        objs_list = response['Versions']
+    except:
+        objs_list = []
+    return len(objs_list)
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_newer_noncurrent():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    # create 10 object versions (9 noncurrent)
+    key = "myobject_"
+
+    for ix in range(10):
+        body = "%s v%d" % (key, ix)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # add a lifecycle rule which sets newer-noncurrent-versions to 5
+    days = 1
+    lifecycle_config = {
+        'Rules': [
+            {
+                'NoncurrentVersionExpiration': {
+                    'NoncurrentDays': days,
+                    'NewerNoncurrentVersions': 5,
+                },
+                'ID': 'newer_noncurrent1',
+                'Filter': {
+                    'Prefix': '',
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lc_interval = get_lc_debug_interval()
+
+    num_objs = wait_interval_list_object_versions(
+        client, bucket_name, 2*lc_interval)
+
+    # at T+20, 6 objects should exist (1 current and (9 - 5) noncurrent)
+    assert num_objs == 6
+
+def get_byte_buffer(nbytes):
+    buf = BytesIO(b"")
+    for x in range(nbytes):
+        buf.write(b"b")
+    buf.seek(0)
+    return buf
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_size_gt():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    # create one object lt and one object gt 2000 bytes
+    key = "myobject_small"
+    body = get_byte_buffer(1000)
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    key = "myobject_big"
+    body = get_byte_buffer(3000)
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # add a lifecycle rule which expires objects greater than 2000 bytes
+    days = 1
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': days
+                },
+                'ID': 'object_gt1',
+                'Filter': {
+                    'Prefix': '',
+                    'ObjectSizeGreaterThan': 2000
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lc_interval = get_lc_debug_interval()
+    time.sleep(10*lc_interval)
+
+    # we should find only the small object present
+    response = client.list_objects(Bucket=bucket_name)
+    objects = response['Contents']
+
+    assert len(objects) == 1
+    assert objects[0]['Key'] == "myobject_small"
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_size_lt():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    # create one object lt and one object gt 2000 bytes
+    key = "myobject_small"
+    body = get_byte_buffer(1000)
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    key = "myobject_big"
+    body = get_byte_buffer(3000)
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # add a lifecycle rule which expires objects greater than 2000 bytes
+    days = 1
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': days
+                },
+                'ID': 'object_lt1',
+                'Filter': {
+                    'Prefix': '',
+                    'ObjectSizeLessThan': 2000
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    lc_interval = get_lc_debug_interval()
+    time.sleep(2*lc_interval)
+
+    # we should find only the large object present
+    response = client.list_objects(Bucket=bucket_name)
+    objects = response['Contents']
+
+    assert len(objects) == 1
+    assert objects[0]['Key'] == "myobject_big"
+
+@pytest.mark.lifecycle
+def test_lifecycle_id_too_long():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.lifecycle
+def test_lifecycle_same_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.lifecycle
+def test_lifecycle_invalid_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_invalid_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_date():
+    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire_objects = response['Contents']
+
+    assert len(init_objects) == 2
+    assert len(expire_objects) == 1
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+def test_lifecycle_expiration_days0():
+    bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
+    client = get_client()
+
+    rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    # days: 0 is legal in a transition rule, but not legal in an
+    # expiration rule
+    response_code = ""
+    try:
+        response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    except botocore.exceptions.ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'InvalidArgument'
+
+
+def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
+                                    rule_prefix):
+    rules=[{'ID': rule_id,
+            'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
+            'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    key = rule_prefix + 'foo'
+    body = 'bar'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    return response
+
+def check_lifecycle_expiration_header(response, start_time, rule_id,
+                                      delta_days):
+    expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
+    if (not expr_exists):
+        return False
+    expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
+
+    m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
+
+    expiration = dateutil.parser.parse(m.group(1))
+    days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
+    rule_eq_id = (m.group(2) == rule_id)
+
+    return  days_to_expire and rule_eq_id
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+def test_lifecycle_expiration_header_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    now = datetime.datetime.utcnow()
+    response = setup_lifecycle_expiration(
+        client, bucket_name, 'rule1', 1, 'days1/')
+    assert check_lifecycle_expiration_header(response, now, 'rule1', 1)
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_header_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    now = datetime.datetime.utcnow()
+    response = setup_lifecycle_expiration(
+        client, bucket_name, 'rule1', 1, 'days1/')
+
+    key = 'days1/' + 'foo'
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert check_lifecycle_expiration_header(response, now, 'rule1', 1)
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_header_tags_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "Tag": {"Key": "key1", "Value": "tag1"}
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    key1 = "obj_key1"
+    body1 = "obj_key1_body"
+    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
+          {'Key': 'key5','Value': 'tag5'}]}
+    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1)
+
+    # test that header is not returning when it should not
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "Tag": {"Key": "key2", "Value": "tag1"}
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) == False
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_expiration_header_and_tags_head():
+    now = datetime.datetime.utcnow()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "And": {
+                    "Tags": [
+                        {
+                            "Key": "key1",
+                            "Value": "tag1"
+                        },
+                        {
+                            "Key": "key5",
+                            "Value": "tag6"
+                        }
+                    ]
+                }
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    key1 = "obj_key1"
+    body1 = "obj_key1_body"
+    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
+          {'Key': 'key5','Value': 'tag5'}]}
+    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) == False
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_noncurrent():
+    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_noncur_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 3)
+    # not checking the object contents on the second run, because the function doesn't support multiple checks
+    create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    expire_versions = response['Versions']
+    assert len(init_versions) == 6
+    assert len(expire_versions) == 4
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_deletemarker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_filter():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_empty_filter():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_deletemarker_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 1)
+    create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
+    client.delete_object(Bucket=bucket_name, Key="test1/a")
+    client.delete_object(Bucket=bucket_name, Key="test2/abc")
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+    deleted_versions = response['DeleteMarkers']
+    total_init_versions = init_versions + deleted_versions
+
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(7*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+    deleted_versions = response['DeleteMarkers']
+    total_expire_versions = init_versions + deleted_versions
+
+    assert len(total_init_versions) == 4
+    assert len(total_expire_versions) == 2
+
+@pytest.mark.lifecycle
+def test_lifecycle_set_multipart():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules = [
+        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
+        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_multipart_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key_names = ['test1/a', 'test2/']
+    upload_ids = []
+
+    for key in key_names:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+        upload_ids.append(response['UploadId'])
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    init_uploads = response['Uploads']
+
+    rules = [
+        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    expired_uploads = response['Uploads']
+    assert len(init_uploads) == 2
+    assert len(expired_uploads) == 1
+
+@pytest.mark.lifecycle
+def test_lifecycle_transition_set_invalid_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2023-09-27'},'Transitions': [{'Date': '20220927','StorageClass': 'GLACIER'}],'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+def _test_encryption_sse_customer_write(file_size):
+    """
+    Tests Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'testobj'
+    data = 'A'*file_size
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == data
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+@pytest.mark.fails_on_aws
+def test_lifecycle_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        pytest.skip('requires 3 or more storage classes')
+
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule2', 'Transitions': [{'Days': 6, 'StorageClass': sc[2]}], 'Prefix': 'expire3/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    assert len(init_keys) == 6
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire1_keys['STANDARD']) == 4
+    assert len(expire1_keys[sc[1]]) == 2
+    assert len(expire1_keys[sc[2]]) == 0
+
+    # Wait for next expiration cycle
+    time.sleep(lc_interval)
+    keep2_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(keep2_keys['STANDARD']) == 4
+    assert len(keep2_keys[sc[1]]) == 2
+    assert len(keep2_keys[sc[2]]) == 0
+
+    # Wait for final expiration cycle
+    time.sleep(5*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire3_keys['STANDARD']) == 2
+    assert len(expire3_keys[sc[1]]) == 2
+    assert len(expire3_keys[sc[2]]) == 2
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+@pytest.mark.fails_on_aws
+def test_lifecycle_transition_single_rule_multi_trans():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        pytest.skip('requires 3 or more storage classes')
+
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}, {'Days': 7, 'StorageClass': sc[2]}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    assert len(init_keys) == 6
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire1_keys['STANDARD']) == 4
+    assert len(expire1_keys[sc[1]]) == 2
+    assert len(expire1_keys[sc[2]]) == 0
+
+    # Wait for next expiration cycle
+    time.sleep(lc_interval)
+    keep2_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(keep2_keys['STANDARD']) == 4
+    assert len(keep2_keys[sc[1]]) == 2
+    assert len(keep2_keys[sc[2]]) == 0
+
+    # Wait for final expiration cycle
+    time.sleep(6*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire3_keys['STANDARD']) == 4
+    assert len(expire3_keys[sc[1]]) == 0
+    assert len(expire3_keys[sc[2]]) == 2
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+def test_lifecycle_set_noncurrent_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        pytest.skip('requires 3 or more storage classes')
+
+    bucket = get_new_bucket()
+    client = get_client()
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 2,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 4,
+                    'StorageClass': sc[2]
+                }
+            ],
+            'NoncurrentVersionExpiration': {
+                'NoncurrentDays': 6
+            }
+        },
+        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}}
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.lifecycle_transition
+@pytest.mark.fails_on_aws
+def test_lifecycle_noncur_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        pytest.skip('requires 3 or more storage classes')
+
+    bucket = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
+
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 1,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 5,
+                    'StorageClass': sc[2]
+                }
+            ],
+            'NoncurrentVersionExpiration': {
+                'NoncurrentDays': 9
+            }
+        }
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    create_multiple_versions(client, bucket, "test1/a", 3)
+    create_multiple_versions(client, bucket, "test1/b", 3)
+
+    init_keys = list_bucket_storage_class(client, bucket)
+    assert len(init_keys['STANDARD']) == 6
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 4
+    assert len(expire1_keys[sc[2]]) == 0
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 0
+    assert len(expire1_keys[sc[2]]) == 4
+
+    time.sleep(6*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 0
+    assert len(expire1_keys[sc[2]]) == 0
+
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.lifecycle_transition
+def test_lifecycle_plain_null_version_current_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 2:
+        pytest.skip('requires 2 or more storage classes')
+
+    target_sc = sc[1]
+    assert target_sc != 'STANDARD'
+
+    bucket = get_new_bucket()
+    check_versioning(bucket, None)
+
+    # create a plain object before enabling versioning;
+    # this will be transitioned as a current version
+    client = get_client()
+    key = 'testobjfoo'
+    content = 'fooz'
+    client.put_object(Bucket=bucket, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
+
+    client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration={
+            'Rules': [
+                {
+                    'ID': 'rule1',
+                    'Prefix': 'testobj',
+                    'Status': 'Enabled',
+                    'Transitions': [
+                        {
+                            'Days': 1,
+                            'StorageClass': target_sc
+                        },
+                    ]
+                }
+            ]
+        })
+
+    lc_interval = get_lc_debug_interval()
+    time.sleep(4*lc_interval)
+
+    keys = list_bucket_storage_class(client, bucket)
+    assert len(keys['STANDARD']) == 0
+    assert len(keys[target_sc]) == 1
+
+def verify_object(client, bucket, key, content=None, sc=None):
+    response = client.get_object(Bucket=bucket, Key=key)
+
+    if (sc == None):
+        sc = 'STANDARD'
+
+    if ('StorageClass' in response):
+        assert response['StorageClass'] == sc
+    else: #storage class should be STANDARD
+        assert 'STANDARD' == sc
+
+    if (content != None):
+        body = _get_body(response)
+        assert body == content
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+@pytest.mark.cloud_transition
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_cloud_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        pytest.skip('no cloud_storage_class configured')
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
+    bucket_name = _create_objects(keys=keys)
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    assert len(init_keys) == 4
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(10*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire1_keys['STANDARD']) == 2
+
+    if (retain_head_object != None and retain_head_object == "true"):
+        assert len(expire1_keys[cloud_sc]) == 2
+    else:
+        assert len(expire1_keys[cloud_sc]) == 0
+
+    time.sleep(2*lc_interval)
+    # Check if objects copied to target path
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket_name + "/"
+
+    cloud_client = get_cloud_client()
+
+    time.sleep(12*lc_interval)
+    expire1_key1_str = prefix + keys[0]
+    verify_object(cloud_client, target_path, expire1_key1_str, keys[0], target_sc)
+
+    expire1_key2_str = prefix + keys[1]
+    verify_object(cloud_client, target_path, expire1_key2_str, keys[1], target_sc)
+
+    # Now verify the object on source rgw
+    src_key = keys[0]
+    if (retain_head_object != None and retain_head_object == "true"):
+        # verify HEAD response
+        response = client.head_object(Bucket=bucket_name, Key=keys[0])
+        assert 0 == response['ContentLength']
+        assert cloud_sc == response['StorageClass']
+    
+        # GET should return InvalidObjectState error
+        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'InvalidObjectState'
+
+        # COPY of object should return InvalidObjectState error
+        copy_source = {'Bucket': bucket_name, 'Key': src_key}
+        e = assert_raises(ClientError, client.copy, CopySource=copy_source, Bucket=bucket_name, Key='copy_obj')
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 403
+        assert error_code == 'InvalidObjectState'
+
+        # DELETE should succeed
+        response = client.delete_object(Bucket=bucket_name, Key=src_key)
+        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 404
+        assert error_code == 'NoSuchKey'
+
+# Similar to 'test_lifecycle_transition' but for cloud transition
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+@pytest.mark.cloud_transition
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_cloud_multiple_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        pytest.skip('[s3 cloud] section missing cloud_storage_class')
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    sc1 = get_cloud_regular_storage_class()
+
+    if (sc1 == None):
+        pytest.skip('[s3 cloud] section missing storage_class')
+
+    sc = ['STANDARD', sc1, cloud_sc]
+
+    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
+    bucket_name = _create_objects(keys=keys)
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc1}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule2', 'Transitions': [{'Days': 5, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule3', 'Expiration': {'Days': 9}, 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    assert len(init_keys) == 4
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 2
+    assert len(expire1_keys[sc[2]]) == 0
+
+    # Wait for next expiration cycle
+    time.sleep(7*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 0
+
+    if (retain_head_object != None and retain_head_object == "true"):
+        assert len(expire1_keys[sc[2]]) == 2
+    else:
+        assert len(expire1_keys[sc[2]]) == 0
+
+    # Wait for final expiration cycle
+    time.sleep(12*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    assert len(expire3_keys['STANDARD']) == 2
+    assert len(expire3_keys[sc[1]]) == 0
+    assert len(expire3_keys[sc[2]]) == 0
+
+# Noncurrent objects for cloud transition
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_expiration
+@pytest.mark.lifecycle_transition
+@pytest.mark.cloud_transition
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_noncur_cloud_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        pytest.skip('[s3 cloud] section missing cloud_storage_class')
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    sc1 = get_cloud_regular_storage_class()
+    if (sc1 == None):
+        pytest.skip('[s3 cloud] section missing storage_class')
+
+    sc = ['STANDARD', sc1, cloud_sc]
+
+    bucket = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
+
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 1,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 5,
+                    'StorageClass': sc[2]
+                }
+            ],
+        }
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    keys = ['test1/a', 'test1/b']
+
+    for k in keys:
+        create_multiple_versions(client, bucket, k, 3)
+
+    init_keys = list_bucket_storage_class(client, bucket)
+    assert len(init_keys['STANDARD']) == 6
+
+    response  = client.list_object_versions(Bucket=bucket)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 4
+    assert len(expire1_keys[sc[2]]) == 0
+
+    time.sleep(10*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 2
+    assert len(expire1_keys[sc[1]]) == 0
+
+    if (retain_head_object == None or retain_head_object == "false"):
+        assert len(expire1_keys[sc[2]]) == 0
+    else:
+        assert len(expire1_keys[sc[2]]) == 4
+
+    #check if versioned object exists on cloud endpoint
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket + "/"
+
+    cloud_client = get_cloud_client()
+
+    time.sleep(lc_interval)
+    result = list_bucket_versions(client, bucket)
+
+    for src_key in keys:
+        for k in result[src_key]: 
+            expire1_key1_str = prefix + 'test1/a' + "-" + k['VersionId']
+            verify_object(cloud_client, target_path, expire1_key1_str, None, target_sc)
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@pytest.mark.lifecycle
+@pytest.mark.lifecycle_transition
+@pytest.mark.cloud_transition
+@pytest.mark.fails_on_aws
+@pytest.mark.fails_on_dbstore
+def test_lifecycle_cloud_transition_large_obj():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        pytest.skip('[s3 cloud] section missing cloud_storage_class')
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    bucket = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+
+    keys = ['keep/multi', 'expire1/multi']
+    size = 9*1024*1024
+    data = 'A'*size
+
+    for k in keys:
+        client.put_object(Bucket=bucket, Body=data, Key=k)
+        verify_object(client, bucket, k, data)
+
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(8*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    assert len(expire1_keys['STANDARD']) == 1
+
+    
+    if (retain_head_object != None and retain_head_object == "true"):
+        assert len(expire1_keys[cloud_sc]) == 1
+    else:
+        assert len(expire1_keys[cloud_sc]) == 0
+
+    # Check if objects copied to target path
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket + "/"
+
+    # multipart upload takes time
+    time.sleep(12*lc_interval)
+    cloud_client = get_cloud_client()
+
+    expire1_key1_str = prefix + keys[1]
+    verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc)
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encrypted_transfer_1b():
+    _test_encryption_sse_customer_write(1)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encrypted_transfer_1kb():
+    _test_encryption_sse_customer_write(1024)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encrypted_transfer_1MB():
+    _test_encryption_sse_customer_write(1024*1024)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encrypted_transfer_13b():
+    _test_encryption_sse_customer_write(13)
+
+
+@pytest.mark.encryption
+def test_encryption_sse_c_method_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*1000
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+@pytest.mark.encryption
+def test_encryption_sse_c_present():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*1000
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+def test_encryption_sse_c_other_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers_A = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+    sse_client_headers_B = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+def test_encryption_sse_c_invalid_md5():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+def test_encryption_sse_c_no_md5():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+@pytest.mark.encryption
+def test_encryption_sse_c_no_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+@pytest.mark.encryption
+def test_encryption_key_no_sse_c():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+    if client == None:
+        client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
+    client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
+    if metadata == None:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    else:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
+
+    upload_id = response['UploadId']
+    s = ''
+    parts = []
+    for i, part in enumerate(generate_random(size, part_size)):
+        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
+        part_num = i+1
+        s += part
+        lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+        client.meta.events.register('before-call.s3.UploadPart', lf)
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
+        if i in resend_parts:
+            lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+            client.meta.events.register('before-call.s3.UploadPart', lf)
+            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+
+    return (upload_id, s, parts)
+
+def _check_content_using_range_enc(client, bucket_name, key, data, size, step, enc_headers=None):
+    for ofs in range(0, size, step):
+        toread = size - ofs
+        if toread > step:
+            toread = step
+        end = ofs + toread - 1
+        lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+        client.meta.events.register('before-call.s3.GetObject', lf)
+        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
+        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
+        read_range = response['ContentLength']
+        body = _get_body(response)
+        assert read_range == toread
+        assert body == data[ofs:end+1]
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encryption_sse_c_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    partlen = 5*1024*1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=partlen, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    assert response['Metadata'] == metadata
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
+
+    body = _get_body(response)
+    assert body == data
+    size = response['ContentLength']
+    assert len(body) == size
+
+    _check_content_using_range_enc(client, bucket_name, key, data, size, 1000000, enc_headers=enc_headers)
+    _check_content_using_range_enc(client, bucket_name, key, data, size, 10000000, enc_headers=enc_headers)
+    for i in range(-1,2):
+        _check_content_using_range_enc(client, bucket_name, key, data, size, partlen + i, enc_headers=enc_headers)
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encryption_sse_c_unaligned_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    partlen = 1 + 5 * 1024 * 1024 # not a multiple of the 4k encryption block size
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=partlen, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    assert response['Metadata'] == metadata
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
+
+    body = _get_body(response)
+    assert body == data
+    size = response['ContentLength']
+    assert len(body) == size
+
+    _check_content_using_range_enc(client, bucket_name, key, data, size, 1000000, enc_headers=enc_headers)
+    _check_content_using_range_enc(client, bucket_name, key, data, size, 10000000, enc_headers=enc_headers)
+    for i in range(-1,2):
+        _check_content_using_range_enc(client, bucket_name, key, data, size, partlen + i, enc_headers=enc_headers)
+
+@pytest.mark.encryption
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+def test_encryption_sse_c_multipart_invalid_chunks_1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+    resend_parts = []
+
+    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
+            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+def test_encryption_sse_c_multipart_invalid_chunks_2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
+    }
+    resend_parts = []
+
+    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
+            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+def test_encryption_sse_c_multipart_bad_download():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    put_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    get_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    assert response['Metadata'] == metadata
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_encryption_sse_c_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
+    ('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
+    ('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+
+    get_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
+    """
+    Tests Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': key_id
+    }
+    data = 'A'*file_size
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    body = _get_body(response)
+    assert body == data
+
+
+
+
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_method_head():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+    data = 'A'*1000
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_present():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == data
+
+@pytest.mark.encryption
+def test_sse_kms_no_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+
+@pytest.mark.encryption
+def test_sse_kms_not_declared():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_multipart_upload():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+    client.meta.events.register('before-call.s3.UploadPart', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    assert response['Metadata'] == metadata
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
+
+    body = _get_body(response)
+    assert body == data
+    size = response['ContentLength']
+    assert len(body) == size
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_multipart_invalid_chunks_1():
+    kms_keyid = get_main_kms_keyid()
+    kms_keyid2 = get_secondary_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/bla'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
+    }
+    resend_parts = []
+
+    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
+            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
+            resend_parts=resend_parts)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_multipart_invalid_chunks_2():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
+    }
+    resend_parts = []
+
+    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
+            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
+            resend_parts=resend_parts)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_post_object_authenticated_request():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["starts-with", "$x-amz-server-side-encryption", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('x-amz-server-side-encryption', 'aws:kms'), \
+    ('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_transfer_1b():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    _test_sse_kms_customer_write(1, key_id = kms_keyid)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_transfer_1kb():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    _test_sse_kms_customer_write(1024, key_id = kms_keyid)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_transfer_1MB():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    _test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
+
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_transfer_13b():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    _test_sse_kms_customer_write(13, key_id = kms_keyid)
+
+
+@pytest.mark.encryption
+def test_sse_kms_read_declare():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.bucket_policy
+def test_bucket_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name)
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+@pytest.mark.list_objects_v2
+def test_bucketv2_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name)
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+def test_bucket_policy_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document =  json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    client.delete_bucket_policy(Bucket=bucket_name)
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+@pytest.mark.bucket_policy
+@pytest.mark.list_objects_v2
+def test_bucketv2_policy_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document =  json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    client.delete_bucket_policy(Bucket=bucket_name)
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+@pytest.mark.bucket_policy
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+def test_bucket_policy_different_tenant():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3::*:" + bucket_name
+    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    # TODO: figure out how to change the bucketname
+    def change_bucket_name(**kwargs):
+        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
+        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
+        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
+        print(kwargs['request_signer'])
+        print(kwargs)
+
+    #bucket_name = ":" + bucket_name
+    tenant_client = get_tenant_client()
+    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
+    response = tenant_client.list_objects(Bucket=bucket_name)
+    #alt_client = get_alt_client()
+    #response = alt_client.list_objects(Bucket=bucket_name)
+
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+@pytest.mark.list_objects_v2
+def test_bucketv2_policy_different_tenant():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3::*:" + bucket_name
+    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    # TODO: figure out how to change the bucketname
+    def change_bucket_name(**kwargs):
+        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
+        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
+        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
+        print(kwargs['request_signer'])
+        print(kwargs)
+
+    #bucket_name = ":" + bucket_name
+    tenant_client = get_tenant_client()
+    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
+    response = tenant_client.list_objects_v2(Bucket=bucket_name)
+    #alt_client = get_alt_client()
+    #response = alt_client.list_objects_v2(Bucket=bucket_name)
+
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+def test_bucket_policy_another_bucket():
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    key2 = 'abcd'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "arn:aws:s3:::*",
+            "arn:aws:s3:::*/*"
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    response = client.get_bucket_policy(Bucket=bucket_name)
+    response_policy = response['Policy']
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name)
+    assert len(response['Contents']) == 1
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name2)
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+@pytest.mark.list_objects_v2
+def test_bucketv2_policy_another_bucket():
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    key2 = 'abcd'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "arn:aws:s3:::*",
+            "arn:aws:s3:::*/*"
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    response = client.get_bucket_policy(Bucket=bucket_name)
+    response_policy = response['Policy']
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name)
+    assert len(response['Contents']) == 1
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name2)
+    assert len(response['Contents']) == 1
+
+@pytest.mark.bucket_policy
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+def test_bucket_policy_set_condition_operator_end_with_IfExists():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'foo'
+    client.put_object(Bucket=bucket_name, Key=key)
+    policy = '''{
+      "Version":"2012-10-17",
+      "Statement": [{
+        "Sid": "Allow Public Access to All Objects",
+        "Effect": "Allow",
+        "Principal": "*",
+        "Action": "s3:GetObject",
+        "Condition": {
+                    "StringLikeIfExists": {
+                        "aws:Referer": "http://www.example.com/*"
+                    }
+                },
+        "Resource": "arn:aws:s3:::%s/*"
+      }
+     ]
+    }''' % bucket_name
+    # boto3.set_stream_logger(name='botocore')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
+
+    request_headers={'referer': 'http://www.example.com/'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    request_headers={'referer': 'http://www.example.com/index.html'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # the 'referer' headers need to be removed for this one
+    #response = client.get_object(Bucket=bucket_name, Key=key)
+    #assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    request_headers={'referer': 'http://example.com'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    # TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    response =  client.get_bucket_policy(Bucket=bucket_name)
+    print(response)
+
+def _create_simple_tagset(count):
+    tagset = []
+    for i in range(count):
+        tagset.append({'Key': str(i), 'Value': str(i)})
+
+    return {'TagSet': tagset}
+
+def _make_random_string(size):
+    return ''.join(random.choice(string.ascii_letters) for _ in range(size))
+
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_get_obj_tagging():
+    key = 'testputtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(2)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+
+@pytest.mark.tagging
+def test_get_obj_head_tagging():
+    key = 'testputtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+    count = 2
+
+    input_tagset = _create_simple_tagset(count)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'] == str(count)
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_put_max_tags():
+    key = 'testputmaxtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+@pytest.mark.tagging
+def test_put_excess_tags():
+    key = 'testputmaxtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(11)
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidTag'
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert len(response['TagSet']) == 0
+
+@pytest.mark.tagging
+def test_put_max_kvsize_tags():
+    key = 'testputmaxkeysize'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(128)
+        v = _make_random_string(256)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    for kv_pair in response['TagSet']:
+        assert kv_pair in input_tagset['TagSet']
+
+@pytest.mark.tagging
+def test_put_excess_key_tags():
+    key = 'testputexcesskeytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(129)
+        v = _make_random_string(256)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidTag'
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert len(response['TagSet']) == 0
+
+@pytest.mark.tagging
+def test_put_excess_val_tags():
+    key = 'testputexcesskeytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(128)
+        v = _make_random_string(257)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidTag'
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert len(response['TagSet']) == 0
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_put_modify_tags():
+    key = 'testputmodifytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    tagset.append({'Key': 'key', 'Value': 'val'})
+    tagset.append({'Key': 'key2', 'Value': 'val2'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+    tagset2 = []
+    tagset2.append({'Key': 'key3', 'Value': 'val3'})
+
+    input_tagset2 = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset2['TagSet']
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_put_delete_tags():
+    key = 'testputmodifytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(2)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+    response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert len(response['TagSet']) == 0
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_post_object_tags_anonymous_request():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    url = _get_post_url(bucket_name)
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    key_name = "foo.txt"
+    input_tagset = _create_simple_tagset(2)
+    # xml_input_tagset is the same as input_tagset in xml.
+    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
+    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
+
+
+    payload = OrderedDict([
+        ("key" , key_name),
+        ("acl" , "public-read"),
+        ("Content-Type" , "text/plain"),
+        ("tagging", xml_input_tagset),
+        ('file', ('bar')),
+    ])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key=key_name)
+    body = _get_body(response)
+    assert body == 'bar'
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+@pytest.mark.tagging
+def test_post_object_tags_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [
+    {"bucket": bucket_name},
+        ["starts-with", "$key", "foo"],
+        {"acl": "private"},
+        ["starts-with", "$Content-Type", "text/plain"],
+        ["content-length-range", 0, 1024],
+        ["starts-with", "$tagging", ""]
+    ]}
+
+    # xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
+    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
+    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([
+        ("key" , "foo.txt"),
+        ("AWSAccessKeyId" , aws_access_key_id),\
+        ("acl" , "private"),("signature" , signature),("policy" , policy),\
+        ("tagging", xml_input_tagset),
+        ("Content-Type" , "text/plain"),
+        ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    assert r.status_code == 204
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    assert body == 'bar'
+
+
+@pytest.mark.tagging
+@pytest.mark.fails_on_dbstore
+def test_put_obj_with_tags():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'testtagobj1'
+    data = 'A'*100
+
+    tagset = []
+    tagset.append({'Key': 'bar', 'Value': ''})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    put_obj_tag_headers = {
+        'x-amz-tagging' : 'foo=bar&bar'
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    assert body == data
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    response_tagset = response['TagSet']
+    tagset = tagset
+    assert response_tagset == tagset
+
+def _make_arn_resource(path="*"):
+    return "arn:aws:s3:::{}".format(path)
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_get_tags_acl_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:GetObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_put_tags_acl_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:PutObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    alt_client = get_alt_client()
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['TagSet'] == input_tagset['TagSet']
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+def test_delete_tags_obj_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:DeleteObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+
+    response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    assert len(response['TagSet']) == 0
+
+def test_versioning_bucket_atomic_upload_return_version_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'bar'
+
+    # for versioning-enabled-bucket, an non-empty version-id should return
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    version_id = response['VersionId']
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    for version in versions:
+        assert version['VersionId'] == version_id
+
+
+    # for versioning-default-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    assert not 'VersionId' in response
+
+    # for versioning-suspended-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    assert not 'VersionId' in response
+
+def test_versioning_bucket_multipart_upload_return_version_id():
+    content_type='text/bla'
+    objlen = 30 * 1024 * 1024
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'bar'
+    metadata={'foo': 'baz'}
+
+    # for versioning-enabled-bucket, an non-empty version-id should return
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    version_id = response['VersionId']
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    for version in versions:
+        assert version['VersionId'] == version_id
+
+    # for versioning-default-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert not 'VersionId' in response
+
+    # for versioning-suspended-bucket, no version-id should return
+    bucket_name = get_new_bucket()
+    key = 'foo'
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert not 'VersionId' in response
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_get_obj_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_get_obj_tagging_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObjectTagging",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # A get object itself should fail since we allowed only GetObjectTagging
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_put_obj_tagging_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObjectTagging",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+    # PUT requests with object tagging are a bit wierd, if you forget to put
+    # the tag which is supposed to be existing anymore well, well subsequent
+    # put requests will fail
+
+    testtagset1 = []
+    testtagset1.append({'Key': 'security', 'Value': 'public'})
+    testtagset1.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': testtagset1}
+
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    testtagset2 = []
+    testtagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': testtagset2}
+
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # Now try putting the original tags again, this should fail
+    input_tagset = {'TagSet': testtagset1}
+
+    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_put_obj_copy_source():
+    bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
+    client = get_client()
+
+    src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       src_resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    bucket_name2 = get_new_bucket()
+
+    tag_conditional = {"StringLike": {
+        "s3:x-amz-copy-source" : bucket_name + "/public/*"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
+
+    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
+
+    # This is possible because we are still the owner, see the grants with
+    # policy on how to do this right
+    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
+    body = _get_body(response)
+    assert body == 'public/foo'
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
+    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
+
+    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
+    body = _get_body(response)
+    assert body == 'public/bar'
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
+    check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_put_obj_copy_source_meta():
+    src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
+    client = get_client()
+
+    src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       src_resource)
+
+    client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
+
+    bucket_name = get_new_bucket()
+
+    tag_conditional = {"StringEquals": {
+        "s3:x-amz-metadata-directive" : "COPY"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
+    alt_client.meta.events.register('before-call.s3.CopyObject', lf)
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
+    alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
+
+    # This is possible because we are still the owner, see the grants with
+    # policy on how to do this right
+    response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
+    body = _get_body(response)
+    assert body == 'public/foo'
+
+    # remove the x-amz-metadata-directive header
+    def remove_header(**kwargs):
+        if ("x-amz-metadata-directive" in kwargs['params']['headers']):
+            del kwargs['params']['headers']["x-amz-metadata-directive"]
+
+    alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
+    check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
+
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+def test_bucket_policy_put_obj_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # An allow conditional will require atleast the presence of an x-amz-acl
+    # attribute a Deny conditional would negate any requests that try to set a
+    # public-read/write acl
+    conditional = {"StringLike": {
+        "s3:x-amz-acl" : "public*"
+    }}
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    s1 = Statement("s3:PutObject",resource)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
+
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    key1 = 'private-key'
+
+    # if we want to be really pedantic, we should check that this doesn't raise
+    # and mark a failure, however if this does raise nosetests would mark this
+    # as an ERROR anyway
+    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
+    #response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    key2 = 'public-key'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
+    alt_client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+@pytest.mark.bucket_policy
+def test_bucket_policy_put_obj_grant():
+
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+
+    # In normal cases a key owner would be the uploader of a key in first case
+    # we explicitly require that the bucket owner is granted full control over
+    # the object uploaded by any user, the second bucket is where no such
+    # policy is enforced meaning that the uploader still retains ownership
+
+    main_user_id = get_main_user_id()
+    alt_user_id = get_alt_user_id()
+
+    owner_id_str = "id=" + main_user_id
+    s3_conditional = {"StringEquals": {
+        "s3:x-amz-grant-full-control" : owner_id_str
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=s3_conditional)
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
+    policy_document2 = make_json_policy("s3:PutObject", resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
+
+    alt_client = get_alt_client()
+    key1 = 'key1'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
+    alt_client.meta.events.register('before-call.s3.PutObject', lf)
+
+    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    def remove_header(**kwargs):
+        if ("x-amz-grant-full-control" in kwargs['params']['headers']):
+            del kwargs['params']['headers']["x-amz-grant-full-control"]
+
+    alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
+
+    key2 = 'key2'
+    response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
+
+    # user 1 is trying to get acl for the object from user2 where ownership
+    # wasn't transferred
+    check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
+
+    acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
+
+    assert acl1_response['Grants'][0]['Grantee']['ID'] == main_user_id
+    assert acl2_response['Grants'][0]['Grantee']['ID'] == alt_user_id
+
+
+@pytest.mark.encryption
+def test_put_obj_enc_conflict_c_s3():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.encryption
+def test_put_obj_enc_conflict_c_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.encryption
+def test_put_obj_enc_conflict_s3_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.encryption
+def test_put_obj_enc_conflict_bad_enc_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'aes:kms',    # aes != aws
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidArgument'
+
+@pytest.mark.encryption
+@pytest.mark.bucket_policy
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_put_obj_s3_noenc():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "AES256"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+
+    # doing this here breaks the next request w/ 400 (non-sse bug).  Do it last.
+    #check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+    #TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
+    # DEBUGGING: run the boto2 and compare the requests
+    # DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
+    # DEBUGGING: try to add other options to put_object to see if that makes the response better
+
+    # first validate that writing a sse-s3 object works
+    response = client.put_object(Bucket=bucket_name, Key=key1_str, ServerSideEncryption='AES256')
+    response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption']
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+
+    # then validate that a non-encrypted object fails.
+    # (this also breaks the connection--non-sse bug, probably because the server
+    #  errors out before it consumes the data...)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_policy
+@pytest.mark.sse_s3
+def test_bucket_policy_put_obj_s3_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-twice'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "AES256"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@pytest.mark.encryption
+@pytest.mark.fails_on_dbstore
+@pytest.mark.bucket_policy
+def test_bucket_policy_put_obj_kms_noenc():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "aws:kms"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+    key2_str ='unicorn'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    # must do check_access_denied last - otherwise, pending data
+    #  breaks next call...
+    response = client.put_object(Bucket=bucket_name, Key=key1_str,
+         ServerSideEncryption='aws:kms', SSEKMSKeyId=kms_keyid)
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
+
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key2_str, Body=key2_str)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_policy
+def test_bucket_policy_put_obj_kms_s3():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "aws:kms"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+# TODO: remove this fails_on_rgw when I fix it
+@pytest.mark.fails_on_rgw
+def test_bucket_policy_put_obj_request_obj_tag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:RequestObjectTag/security" : "public"
+    }}
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
+    policy_document = p.add_statement(s1).to_json()
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    key1_str ='testobj'
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+    headers = {"x-amz-tagging" : "security=public"}
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    #TODO: why is this a 400 and not passing
+    alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@pytest.mark.tagging
+@pytest.mark.bucket_policy
+@pytest.mark.fails_on_dbstore
+def test_bucket_policy_get_obj_acl_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObjectAcl",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    # A get object itself should fail since we allowed only GetObjectTagging
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    response = client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'COMPLIANCE',
+                    'Years':1
+                }
+            }}
+    response = client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    response = client.get_bucket_versioning(Bucket=bucket_name)
+    assert response['Status'] == 'Enabled'
+
+
+def test_object_lock_put_obj_lock_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'InvalidBucketState'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock_with_days_and_years():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1,
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock_invalid_days():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':0
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRetentionPeriod'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock_invalid_years():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Years':-1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRetentionPeriod'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock_invalid_mode():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'abc',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'governance',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_lock_invalid_status():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Disabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_suspend_versioning():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 409
+    assert error_code == 'InvalidBucketState'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_get_obj_lock():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    response = client.get_object_lock_configuration(Bucket=bucket_name)
+    assert response['ObjectLockConfiguration'] == conf
+
+
+def test_object_lock_get_obj_lock_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 404
+    assert error_code == 'ObjectLockConfigurationNotFoundError'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2140,1,1,tzinfo=pytz.UTC)}
+    response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    assert response['Retention'] == retention
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+
+def test_object_lock_put_obj_retention_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRequest'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_invalid_mode():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+    retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_get_obj_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    assert response['Retention'] == retention
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_get_obj_retention_iso8601():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    date = datetime.datetime.today() + datetime.timedelta(days=365)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate': date}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    client.meta.events.register('after-call.s3.HeadObject', get_http_response)
+    client.head_object(Bucket=bucket_name,VersionId=version_id,Key=key)
+    retain_date = http_response['headers']['x-amz-object-lock-retain-until-date']
+    isodate.parse_datetime(retain_date)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+def test_object_lock_get_obj_retention_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRequest'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_versionid():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
+    assert response['Retention'] == retention
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_override_default_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    assert response['Retention'] == retention
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_increase_period():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
+    retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    assert response['Retention'] == retention2
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_shorten_period():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_obj_retention_shorten_period_bypass():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    assert response['Retention'] == retention
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_object_with_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_multipart_object_with_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+
+    key = 'file1'
+    body = 'abc'
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key, ObjectLockMode='GOVERNANCE',
+                                              ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
+    upload_id = response['UploadId']
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=body)
+    parts = [{'ETag': response['ETag'].strip('"'), 'PartNumber': 1}]
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_object_with_retention_and_marker():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    del_response = client.delete_object(Bucket=bucket_name, Key=key)
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=del_response['VersionId'])
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_multi_delete_object_with_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key1 = 'file1'
+    key2 = 'file2'
+
+    response1 = client.put_object(Bucket=bucket_name, Body='abc', Key=key1)
+    response2 = client.put_object(Bucket=bucket_name, Body='abc', Key=key2)
+
+    versionId1 = response1['VersionId']
+    versionId2 = response2['VersionId']
+
+    # key1 is under retention, but key2 isn't.
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key1, Retention=retention)
+
+    delete_response = client.delete_objects(
+        Bucket=bucket_name,
+        Delete={
+            'Objects': [
+                {
+                    'Key': key1,
+                    'VersionId': versionId1
+                },
+                {
+                    'Key': key2,
+                    'VersionId': versionId2
+                }
+            ]
+        }
+    )
+
+    assert len(delete_response['Deleted']) == 1
+    assert len(delete_response['Errors']) == 1
+    
+    failed_object = delete_response['Errors'][0]
+    assert failed_object['Code'] == 'AccessDenied'
+    assert failed_object['Key'] == key1
+    assert failed_object['VersionId'] == versionId1
+
+    deleted_object = delete_response['Deleted'][0]
+    assert deleted_object['Key'] == key2
+    assert deleted_object['VersionId'] == versionId2
+
+    delete_response = client.delete_objects(
+        Bucket=bucket_name,
+        Delete={
+            'Objects': [
+                {
+                    'Key': key1,
+                    'VersionId': versionId1
+                }
+            ]
+        },
+        BypassGovernanceRetention=True
+    )
+
+    assert( ('Errors' not in delete_response) or (len(delete_response['Errors']) == 0) )
+    assert len(delete_response['Deleted']) == 1
+    deleted_object = delete_response['Deleted'][0]
+    assert deleted_object['Key'] == key1
+    assert deleted_object['VersionId'] == versionId1
+
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_legal_hold():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+def test_object_lock_put_legal_hold_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRequest'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_put_legal_hold_invalid_status():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'abc'}
+    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'MalformedXML'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_get_legal_hold():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
+    assert response['LegalHold'] == legal_hold
+    legal_hold_off = {'Status': 'OFF'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
+    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
+    assert response['LegalHold'] == legal_hold_off
+
+
+def test_object_lock_get_legal_hold_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert error_code == 'InvalidRequest'
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_object_with_legal_hold_on():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_multipart_object_with_legal_hold_on():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+
+    key = 'file1'
+    body = 'abc'
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key, ObjectLockLegalHoldStatus='ON')
+    upload_id = response['UploadId']
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=body)
+    parts = [{'ETag': response['ETag'].strip('"'), 'PartNumber': 1}]
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_delete_object_with_legal_hold_off():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_get_obj_metadata():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ObjectLockMode'] == retention['Mode']
+    assert response['ObjectLockRetainUntilDate'] == retention['RetainUntilDate']
+    assert response['ObjectLockLegalHoldStatus'] == legal_hold['Status']
+
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_uploading_obj():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ObjectLockMode'] == 'GOVERNANCE'
+    assert response['ObjectLockRetainUntilDate'] == datetime.datetime(2030,1,1,tzinfo=pytz.UTC)
+    assert response['ObjectLockLegalHoldStatus'] == 'ON'
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_changing_mode_from_governance_with_bypass():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=GOVERNANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # change mode to COMPLIANCE
+    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_changing_mode_from_governance_without_bypass():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=GOVERNANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # try to change mode to COMPLIANCE
+    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.fails_on_dbstore
+def test_object_lock_changing_mode_from_compliance():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=COMPLIANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='COMPLIANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # try to change mode to GOVERNANCE
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':retain_until}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+@pytest.mark.fails_on_dbstore
+def test_copy_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
+    response = client.get_object(Bucket=bucket_name, Key='bar')
+    body = _get_body(response)
+    assert body == 'bar'
+
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
+@pytest.mark.fails_on_rgw
+def test_copy_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
+@pytest.mark.fails_on_rgw
+def test_copy_object_ifnonematch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 412
+    assert error_code == 'PreconditionFailed'
+
+@pytest.mark.fails_on_dbstore
+def test_copy_object_ifnonematch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
+    response = client.get_object(Bucket=bucket_name, Key='bar')
+    body = _get_body(response)
+    assert body == 'bar'
+
+# TODO: results in a 404 instead of 400 on the RGW
+@pytest.mark.fails_on_rgw
+def test_object_read_unreadable():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+    assert e.response['Error']['Message'] == 'Couldn\'t parse the specified URI.'
+
+def test_get_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == False
+
+def test_get_public_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == True
+
+def test_get_authpublic_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == True
+
+
+def test_get_publicpolicy_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == False
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == True
+
+
+def test_get_nonpublicpolicy_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == False
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ],
+        "Condition": {
+            "IpAddress":
+            {"aws:SourceIp": "10.0.0.0/32"}
+        }
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == False
+
+
+def test_get_nonpublicpolicy_deny_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == False
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ],
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    assert resp['PolicyStatus']['IsPublic'] == True
+
+def test_get_undefined_public_block():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # delete the existing public access block configuration
+    # as AWS creates a default public access block configuration
+    resp = client.delete_public_access_block(Bucket=bucket_name)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response_code = ""
+    try:
+        resp = client.get_public_access_block(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'NoSuchPublicAccessBlockConfiguration'
+
+def test_get_public_block_deny_bucket_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': True,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    # make sure we can get the public access block
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
+    assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == access_conf['IgnorePublicAcls']
+    assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == access_conf['RestrictPublicBuckets']
+
+    # make bucket policy to deny access
+    resource = _make_arn_resource(bucket_name)
+    policy_document = make_json_policy("s3:GetBucketPublicAccessBlock",
+                                       resource, effect="Deny")
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    # check if the access is denied
+    e = assert_raises(ClientError, client.get_public_access_block, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+def test_put_public_block():
+    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': True,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
+    assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == access_conf['IgnorePublicAcls']
+    assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == access_conf['RestrictPublicBuckets']
+
+
+def test_block_public_put_bucket_acls():
+    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
+    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+def test_block_public_object_canned_acls():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': False,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    # resp = client.get_public_access_block(Bucket=bucket_name)
+    # assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
+    # assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
+
+    #FIXME: use empty body until #42208
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+
+
+def test_block_public_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': False,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       resource)
+
+    check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
+
+
+def test_ignore_public_acls():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    alt_client = get_alt_client()
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    # Public bucket should be accessible
+    alt_client.list_objects(Bucket=bucket_name)
+
+    client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
+    resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
+    assert _get_body(resp) == 'abcde'
+
+    access_conf = {'BlockPublicAcls': False,
+                   'IgnorePublicAcls': True,
+                   'BlockPublicPolicy': False,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    # IgnorePublicACLs is true, so regardless this should behave as a private bucket
+    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
+
+
+def test_multipart_upload_on_a_bucket_with_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": "*",
+        "Action": "*",
+        "Resource": [
+            resource1,
+            resource2
+          ],
+        }]
+     })
+    key = "foo"
+    objlen=50*1024*1024
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client)
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def _put_bucket_encryption_s3(client, bucket_name):
+    """
+    enable a default encryption policy on the given bucket
+    """
+    server_side_encryption_conf = {
+        'Rules': [
+            {
+                'ApplyServerSideEncryptionByDefault': {
+                    'SSEAlgorithm': 'AES256'
+                }
+            },
+        ]
+    }
+    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+def _put_bucket_encryption_kms(client, bucket_name):
+    """
+    enable a default encryption policy on the given bucket
+    """
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-again'
+    server_side_encryption_conf = {
+        'Rules': [
+            {
+                'ApplyServerSideEncryptionByDefault': {
+                    'SSEAlgorithm': 'aws:kms',
+                    'KMSMasterKeyID': kms_keyid
+                }
+            },
+        ]
+    }
+    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+@pytest.mark.sse_s3
+def test_put_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+@pytest.mark.encryption
+def test_put_bucket_encryption_kms():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+
+@pytest.mark.sse_s3
+def test_get_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
+
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    response = client.get_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'AES256'
+
+
+@pytest.mark.encryption
+def test_get_bucket_encryption_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-again'
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
+
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    response = client.get_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'aws:kms'
+    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID'] == kms_keyid
+
+
+@pytest.mark.sse_s3
+def test_delete_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
+
+
+@pytest.mark.encryption
+def test_delete_bucket_encryption_kms():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
+
+def _test_sse_s3_default_upload(file_size):
+    """
+    Test enables bucket encryption.
+    Create a file of A's of certain size, and use it to set_contents_from_file.
+    Re-read the contents, and confirm we get same content as input i.e., A's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+    body = _get_body(response)
+    assert body == data
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_upload_1b():
+    _test_sse_s3_default_upload(1)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_upload_1kb():
+    _test_sse_s3_default_upload(1024)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_upload_1mb():
+    _test_sse_s3_default_upload(1024*1024)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_upload_8mb():
+    _test_sse_s3_default_upload(8*1024*1024)
+
+def _test_sse_kms_default_upload(file_size):
+    """
+    Test enables bucket encryption.
+    Create a file of A's of certain size, and use it to set_contents_from_file.
+    Re-read the contents, and confirm we get same content as input i.e., A's
+    """
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
+    body = _get_body(response)
+    assert body == data
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_default_upload_1b():
+    _test_sse_kms_default_upload(1)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_default_upload_1kb():
+    _test_sse_kms_default_upload(1024)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_default_upload_1mb():
+    _test_sse_kms_default_upload(1024*1024)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_default_upload_8mb():
+    _test_sse_kms_default_upload(8*1024*1024)
+
+
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_method_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    data = 'A'*1000
+    key = 'testobj'
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+
+    sse_s3_headers = {
+        'x-amz-server-side-encryption': 'AES256',
+    }
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_s3_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 400
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
+    assert len(response['Contents']) == 1
+    assert response['Contents'][0]['Size'] == objlen
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+    client.meta.events.register('before-call.s3.UploadPart', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    assert response['Metadata'] == metadata
+    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+
+    body = _get_body(response)
+    assert body == data
+    size = response['ContentLength']
+    assert len(body) == size
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_default_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {
+            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
+            "conditions": [
+                {"bucket": bucket_name},
+                ["starts-with", "$key", "foo"],
+                {"acl": "private"},
+                ["starts-with", "$Content-Type", "text/plain"],
+                ["starts-with", "$x-amz-server-side-encryption", ""], 
+                ["content-length-range", 0, 1024]
+            ]
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('file', ('bar'))])
+
+    r = requests.post(url, files = payload)
+    assert r.status_code == 204
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+    body = _get_body(response)
+    assert body == 'bar'
+
+@pytest.mark.encryption
+@pytest.mark.bucket_encryption
+@pytest.mark.fails_on_dbstore
+def test_sse_kms_default_post_object_authenticated_request():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        pytest.skip('[s3 main] section missing kms_keyid')
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {
+            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
+            "conditions": [
+                {"bucket": bucket_name},
+                ["starts-with", "$key", "foo"],
+                {"acl": "private"},
+                ["starts-with", "$Content-Type", "text/plain"],
+                ["starts-with", "$x-amz-server-side-encryption", ""], 
+                ["content-length-range", 0, 1024]
+            ]
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('file', ('bar'))])
+
+    r = requests.post(url, files = payload)
+    assert r.status_code == 204
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
+    body = _get_body(response)
+    assert body == 'bar'
+
+
+def _test_sse_s3_encrypted_upload(file_size):
+    """
+    Test upload of the given size, specifically requesting sse-s3 encryption.
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data, ServerSideEncryption='AES256')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
+    body = _get_body(response)
+    assert body == data
+
+@pytest.mark.encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_encrypted_upload_1b():
+    _test_sse_s3_encrypted_upload(1)
+
+@pytest.mark.encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_encrypted_upload_1kb():
+    _test_sse_s3_encrypted_upload(1024)
+
+@pytest.mark.encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_encrypted_upload_1mb():
+    _test_sse_s3_encrypted_upload(1024*1024)
+
+@pytest.mark.encryption
+@pytest.mark.sse_s3
+@pytest.mark.fails_on_dbstore
+def test_sse_s3_encrypted_upload_8mb():
+    _test_sse_s3_encrypted_upload(8*1024*1024)
+
+def test_get_object_torrent():
+    client = get_client()
+    bucket_name = get_new_bucket()
+    key = 'Avatar.mpg'
+
+    file_size = 7 * 1024 * 1024
+    data = 'A' * file_size
+
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = None
+    try:
+        response = client.get_object_torrent(Bucket=bucket_name, Key=key)
+        # if successful, verify the torrent contents are different from the body
+        assert data != _get_body(response)
+    except ClientError as e:
+        # accept 404 errors - torrent support may not be configured
+        status, error_code = _get_status_and_error_code(e.response)
+        assert status == 404
+        assert error_code == 'NoSuchKey'
+
+def test_upload_part_copy_percent_encoded_key():
+    
+    s3_client = get_client()
+    bucket_name = get_new_bucket()
+    key = "anyfile.txt"
+    encoded_key = "anyfilename%25.txt"
+    raw_key = "anyfilename%.txt"
+
+    ## PutObject: the copy source
+    s3_client.put_object(
+        Bucket=bucket_name,
+        Key=encoded_key,
+        Body=b"foo",
+        ContentType="text/plain"
+    )
+
+    # Upload the target object (initial state)
+    s3_client.put_object(
+        Bucket=bucket_name,
+        Key=key,
+        Body=b"foo",
+        ContentType="text/plain"
+    )
+
+    # Initiate multipart upload
+    mp_response = s3_client.create_multipart_upload(
+        Bucket=bucket_name,
+        Key=key
+    )
+    upload_id = mp_response["UploadId"]
+
+    # The following operation is expected to fail
+    with pytest.raises(s3_client.exceptions.ClientError) as exc_info:
+        s3_client.upload_part_copy(
+            Bucket=bucket_name,
+            Key=key,
+            PartNumber=1,
+            UploadId=upload_id,
+            CopySource={'Bucket': bucket_name, 'Key': raw_key}
+        )
+
+    # Download the object and verify content
+    final_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
+    content = final_obj['Body'].read()
+    assert content == b"foo"
diff --git a/s3tests/functional/test_s3select.py b/s3tests/functional/test_s3select.py
new file mode 100644 (file)
index 0000000..1c0587a
--- /dev/null
@@ -0,0 +1,1685 @@
+import pytest
+import random
+import string
+import re
+import json
+from botocore.exceptions import ClientError
+from botocore.exceptions import EventStreamError
+
+import uuid
+import warnings
+import traceback
+
+from . import (
+    configfile,
+    setup_teardown,
+    get_client,
+    get_new_bucket_name
+    )
+
+import logging
+logging.basicConfig(level=logging.INFO)
+
+import collections
+collections.Callable = collections.abc.Callable
+
+region_name = ''
+
+# recurssion function for generating arithmetical expression 
+def random_expr(depth):
+    # depth is the complexity of expression 
+    if depth==1 :
+        return str(int(random.random() * 100) + 1)+".0"
+    return '(' + random_expr(depth-1) + random.choice(['+','-','*','/']) + random_expr(depth-1) + ')'
+
+
+def generate_s3select_where_clause(bucket_name,obj_name):
+
+    a=random_expr(4)
+    b=random_expr(4)
+    s=random.choice([ '<','>','=','<=','>=','!=' ])
+
+    try:
+        eval( a )
+        eval( b )
+    except ZeroDivisionError:
+        return
+
+    # generate s3select statement using generated randome expression
+    # upon count(0)>0 it means true for the where clause expression
+    # the python-engine {eval( conditional expression )} should return same boolean result.
+    s3select_stmt =  "select count(0) from s3object where " + a + s + b + ";"
+
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,s3select_stmt) ).replace(",","")
+
+    if  s == '=':
+        s = '=='
+
+    s3select_assert_result(int(res)>0 , eval( a + s + b ))
+
+def generate_s3select_expression_projection(bucket_name,obj_name):
+
+        # generate s3select statement using generated randome expression
+        # statement return an arithmetical result for the generated expression.
+        # the same expression is evaluated by python-engine, result should be close enough(Epsilon)
+        
+        e = random_expr( 4 )
+
+        try:
+            eval( e )
+        except ZeroDivisionError:
+            return
+
+        if eval( e ) == 0:
+            return
+
+        res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,"select " + e + " from s3object;",) ).replace(",","")
+
+        # accuracy level 
+        epsilon = float(0.00001) 
+
+        # both results should be close (epsilon)
+        assert(  abs(float(res.split("\n")[0]) - eval(e)) < epsilon )
+
+@pytest.mark.s3select
+def get_random_string():
+
+    return uuid.uuid4().hex[:6].upper()
+
+@pytest.mark.s3select
+def test_generate_where_clause():
+
+    # create small csv file for testing the random expressions
+    single_line_csv = create_random_csv_object(1,1)
+    bucket_name = get_new_bucket_name()
+    obj_name = get_random_string() #"single_line_csv.csv"
+    upload_object(bucket_name,obj_name,single_line_csv)
+       
+    for _ in range(100): 
+        generate_s3select_where_clause(bucket_name,obj_name)
+
+
+@pytest.mark.s3select
+def test_generate_projection():
+
+    # create small csv file for testing the random expressions
+    single_line_csv = create_random_csv_object(1,1)
+    bucket_name = get_new_bucket_name()
+    obj_name = get_random_string() #"single_line_csv.csv"
+    upload_object(bucket_name,obj_name,single_line_csv)
+       
+    for _ in range(100): 
+        generate_s3select_expression_projection(bucket_name,obj_name)
+
+def s3select_assert_result(a,b):
+    if type(a) == str:
+        a_strip = a.strip()
+        b_strip = b.strip()
+        if a=="" and b=="":
+            warnings.warn(UserWarning("{}".format("both results are empty, it may indicates a wrong input, please check the test input")))
+            ## print the calling function that created the empty result.
+            stack = traceback.extract_stack(limit=2)
+            formatted_stack = traceback.format_list(stack)[0]
+            warnings.warn(UserWarning("{}".format(formatted_stack)))
+            return True
+        assert a_strip != ""
+        assert b_strip != ""
+    else:
+        if a=="" and b=="":
+            warnings.warn(UserWarning("{}".format("both results are empty, it may indicates a wrong input, please check the test input")))
+            ## print the calling function that created the empty result.
+            stack = traceback.extract_stack(limit=2)
+            formatted_stack = traceback.format_list(stack)[0]
+            warnings.warn(UserWarning("{}".format(formatted_stack)))
+            return True
+        assert a != ""
+        assert b != ""
+    assert True
+
+def create_csv_object_for_datetime(rows,columns):
+        result = ""
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                row = row + "{}{:02d}{:02d}T{:02d}{:02d}{:02d}Z,".format(random.randint(0,100)+1900,random.randint(1,12),random.randint(1,28),random.randint(0,23),random.randint(0,59),random.randint(0,59),)
+            result += row + "\n"
+
+        return result
+
+def create_random_csv_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                row = row + "{}{}".format(random.randint(0,1000),col_delim)
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_string(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,9) == 5:
+                    row = row + "{}{}".format(''.join(random.choice(string.ascii_letters) for m in range(10)) + "aeiou",col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("cbcd" + random.choice(string.ascii_letters) for m in range(10)) + "vwxyzzvwxyz" ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_trim(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,5) == 2:
+                    row = row + "{}{}".format(''.join("   aeiou    ") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("abcd") ,col_delim)
+
+
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_escape(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,9) == 5:
+                    row = row + "{}{}".format(''.join("_ar") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("aeio_")  ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_null(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,5) == 2:
+                    row = row + "{}{}".format(''.join("") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("abc") ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_json_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = "{\"root\" : ["
+        result += record_delim
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            num = 0
+            row += "{"
+            for _ in range(columns):
+                num += 1
+                row = row + "\"c" + str(num) + "\"" + ": " "{}{}".format(random.randint(0,1000),col_delim)
+            row = row[:-1]
+            row += "}"
+            row += ","
+            result += row + record_delim
+        
+        result = result[:-2]  
+        result += record_delim
+        result += "]" + "}"
+
+        return result
+
+def csv_to_json(obj, field_split=",",row_split="\n",csv_schema=""):
+    result = "{\"root\" : ["
+    result += row_split
+    if len(csv_schema)>0 :
+        result = csv_schema + row_split
+    
+    for rec in obj.split(row_split):
+        row = ""
+        num = 0
+        row += "{"
+        for col in rec.split(field_split):
+            if col == "":
+                break
+            num += 1
+            row = row + "\"c" + str(num) + "\"" + ": " "{}{}".format(col,field_split)
+        row = row[:-1]
+        row += "}"
+        row += ","
+        result += row + row_split
+        
+    result = result[:-5]  
+    result += row_split
+    result += "]" + "}"
+
+    return result
+
+def upload_object(bucket_name,new_key,obj):
+
+        client = get_client()
+        client.create_bucket(Bucket=bucket_name)
+        client.put_object(Bucket=bucket_name, Key=new_key, Body=obj)
+
+        # validate uploaded object
+        c2 = get_client()
+        response = c2.get_object(Bucket=bucket_name, Key=new_key)
+        assert response['Body'].read().decode('utf-8') == obj, 's3select error[ downloaded object not equal to uploaded objecy'
+
+def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE", progress = False):
+
+    s3 = get_client()
+    result = ""
+    result_status = {}
+
+    try:
+        r = s3.select_object_content(
+        Bucket=bucket,
+        Key=key,
+        ExpressionType='SQL',
+        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
+        OutputSerialization = {"CSV": {}},
+        Expression=query,
+        RequestProgress = {"Enabled": progress})
+
+    except ClientError as c:
+        result += str(c)
+        return result
+
+    if progress == False:
+
+        try:
+            for event in r['Payload']:
+                if 'Records' in event:
+                    records = event['Records']['Payload'].decode('utf-8')
+                    result += records
+
+        except EventStreamError as c:
+            result = str(c)
+            return result
+        
+    else:
+            result = []
+            max_progress_scanned = 0
+            for event in r['Payload']:
+                if 'Records' in event:
+                    records = event['Records']
+                    result.append(records.copy())
+                if 'Progress' in event:
+                    if(event['Progress']['Details']['BytesScanned'] > max_progress_scanned):
+                        max_progress_scanned = event['Progress']['Details']['BytesScanned']
+                        result_status['Progress'] = event['Progress']
+
+                if 'Stats' in event:
+                    result_status['Stats'] = event['Stats']
+                if 'End' in event:
+                    result_status['End'] = event['End']
+
+
+    if progress == False:
+        return result
+    else:
+        return result,result_status
+
+def run_s3select_output(bucket,key,query, quot_field, op_column_delim = ",", op_row_delim = "\n",  column_delim=",", op_quot_char = '"', op_esc_char = '\\', row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE"):
+
+    s3 = get_client()
+
+    r = s3.select_object_content(
+        Bucket=bucket,
+        Key=key,
+        ExpressionType='SQL',
+        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
+        OutputSerialization = {"CSV": {"RecordDelimiter" : op_row_delim, "FieldDelimiter" : op_column_delim, "QuoteCharacter" : op_quot_char, "QuoteEscapeCharacter" : op_esc_char, "QuoteFields" : quot_field}},
+        Expression=query,)
+    
+    result = ""
+    for event in r['Payload']:
+        if 'Records' in event:
+            records = event['Records']['Payload'].decode('utf-8')
+            result += records
+    
+    return result
+
+def run_s3select_json(bucket,key,query, op_row_delim = "\n"):
+
+    s3 = get_client()
+
+    r = s3.select_object_content(
+        Bucket=bucket,
+        Key=key,
+        ExpressionType='SQL',
+        InputSerialization = {"JSON": {"Type": "DOCUMENT"}},
+        OutputSerialization = {"JSON": {}},
+        Expression=query,)
+    #Record delimiter optional in output serialization
+    
+    result = ""
+    for event in r['Payload']:
+        if 'Records' in event:
+            records = event['Records']['Payload'].decode('utf-8')
+            result += records
+    
+    return result
+
+def remove_xml_tags_from_result(obj):
+    result = ""
+    for rec in obj.split("\n"):
+        if(rec.find("Payload")>0 or rec.find("Records")>0):
+            continue
+        result += rec + "\n" # remove by split
+
+    result_strip= result.strip()
+    x = bool(re.search("^failure.*$", result_strip))
+    if x:
+        logging.info(result)
+    assert x == False
+
+    return result
+
+def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"):
+    
+    list_of_int = [] 
+    for rec in obj.split(row_split):
+        col_num = 1
+        if ( len(rec) == 0):
+            continue
+        for col in rec.split(field_split):
+            if (col_num == column_pos):
+                list_of_int.append(int(col))
+            col_num+=1
+
+    return list_of_int
+
+@pytest.mark.s3select
+def test_count_operation():
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+    num_of_rows = 1234
+    obj_to_load = create_random_csv_object(num_of_rows,10)
+    upload_object(bucket_name,csv_obj_name,obj_to_load)
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;") ).replace(",","")
+
+    s3select_assert_result( num_of_rows, int( res ))
+
+@pytest.mark.s3select
+def test_count_json_operation():
+    json_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    num_of_rows = 1
+    obj_to_load = create_random_json_object(num_of_rows,10)
+    upload_object(bucket_name,json_obj_name,obj_to_load)
+    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*];"))
+    s3select_assert_result( 1,  int(res))
+
+    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root;"))
+    s3select_assert_result( 1,  int(res))
+
+    obj_to_load = create_random_json_object(3,10)
+    upload_object(bucket_name,json_obj_name,obj_to_load)
+    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root;"))
+    s3select_assert_result( 3,  int(res))
+
+@pytest.mark.s3select
+def test_json_column_sum_min_max():
+    csv_obj = create_random_csv_object(10000,10)
+
+    json_obj = csv_to_json(csv_obj);
+
+    json_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,json_obj_name,json_obj)
+    
+    json_obj_name_2 = get_random_string()
+    bucket_name_2 = "testbuck2"
+    upload_object(bucket_name_2,json_obj_name_2,json_obj)
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select min(_1.c1) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 1 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select min(_1.c4) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select avg(_1.c6) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 6 , csv_obj )
+    res_target = float(sum(list_int ))/10000
+
+    s3select_assert_result( float(res_s3select), float(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select max(_1.c4) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select max(_1.c7) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select sum(_1.c4) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select sum(_1.c7) from s3object[*].root;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result(  int(res_s3select) , int(res_target) )
+
+    # the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name_2,json_obj_name_2,"select count(0),sum(_1.c1),sum(_1.c2) from s3object[*].root where (_1.c1-_1.c2) = 2;" ) )
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*2 , int(sum1)-int(sum2 ) )
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0),sum(_1.c1),sum(_1.c2) from s3object[*].root where (_1.c1-_1.c2) = 4;" ) ) 
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) )
+
+@pytest.mark.s3select
+def test_json_nullif_expressions():
+
+    json_obj = create_random_json_object(10000,10)
+
+    json_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,json_obj_name,json_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where nullif(_1.c1,_1.c2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 = _1.c2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (nullif(_1.c1,_1.c2) is null) from s3object[*].root ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (_1.c1 = _1.c2) from s3object[*].root  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where not nullif(_1.c1,_1.c2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 != _1.c2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (nullif(_1.c1,_1.c2) is not null) from s3object[*].root ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (_1.c1 != _1.c2) from s3object[*].root  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where  nullif(_1.c1,_1.c2) = _1.c1 ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 != _1.c2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+
+@pytest.mark.s3select
+def test_column_sum_min_max():
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+    
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+    
+    csv_obj_name_2 = get_random_string()
+    bucket_name_2 = "testbuck2"
+    upload_object(bucket_name_2,csv_obj_name_2,csv_obj)
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 1 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select avg(int(_6)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 6 , csv_obj )
+    res_target = float(sum(list_int ))/10000
+
+    s3select_assert_result( float(res_s3select), float(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_7)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_7)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result(  int(res_s3select) , int(res_target) )
+
+    # the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name_2,csv_obj_name_2,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 2;" ) )
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*2 , int(sum1)-int(sum2 ) )
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 4;" ) ) 
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) )
+
+@pytest.mark.s3select
+def test_nullif_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is null) from s3object ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 = _2) from s3object  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is not null) from s3object ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 != _2) from s3object  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where  nullif(_1,_2) = _1 ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    csv_obj = create_random_csv_object_null(10000,10)
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,null) is null;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 is null;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,null) is null) from s3object;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 is null) from s3object;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+@pytest.mark.s3select
+def test_nulliftrue_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) = _1 = true) ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+@pytest.mark.s3select
+def test_is_not_null_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,_2) is not null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_null, res_s3select)
+
+    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where (nullif(_1,_1) and _1 = _2) is not null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_null, res_s3select)
+
+@pytest.mark.s3select
+def test_lowerupper_expressions():
+
+    csv_obj = create_random_csv_object(1,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from s3object ;')  ).replace("\n","")
+
+    s3select_assert_result( res_s3select, "ab12cd$$")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from s3object ;')  ).replace("\n","")
+
+    s3select_assert_result( res_s3select, "AB12CD$$")
+
+@pytest.mark.s3select
+def test_in_expressions():
+
+    # purpose of test: engine is process correctly several projections containing aggregation-functions
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1,0);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1,0)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1 or int(_1) = 0) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) in(1,0,2);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) in(1,0,2)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) = 1 or int(_2) = 0 or int(_2) = 2) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (character_length(_1) = 2 and substring(_1,2,1) in ("3")) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "_3") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+@pytest.mark.s3select
+def test_true_false_in_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    ## 1,2 must exist in first/second column (to avoid empty results)
+    csv_obj = csv_obj + "1,2,,,,,,,,,,\n"
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1,0)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2) in(1,0,2)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (character_length(_1) = 2) = true and (substring(_1,2,1) in ("3")) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in (1,2,0)) as a1 from s3object where a1 = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select \"true\"from s3object where (int(_1) in (1,0,2)) ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )  
+
+@pytest.mark.s3select
+def test_like_expressions():
+
+    csv_obj = create_random_csv_object_string(1000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select  (_1 like "%aeio%") from s3object ;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,11,4) = "aeio") from s3object ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "cbcd%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%" like;')).replace("\n","")
+
+    find_like = res_s3select_like.find("UnsupportedSyntax")
+
+    assert int(find_like) >= 0
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "cbcd%") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,1,4) = "cbcd") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "%y[y-z]";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "%y[y-z]") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%yz";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%yz") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "c%z";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "c%z") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%xy_";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%xy_") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+@pytest.mark.s3select
+def test_truefalselike_expressions():
+
+    csv_obj = create_random_csv_object_string(1000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "%aeio%") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "cbcd%") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "%y[y-z]") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) between "y" and "z") = true and (substring(_3,char_length(_3)-1,1) = "y") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%yz") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2),1) = "z") = true and (substring(_2,char_length(_2)-1,1) = "y") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "c%z") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) = "z") = true and (substring(_3,1,1) = "c") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%xy_") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2)-1,1) = "y") = true and (substring(_2,char_length(_2)-2,1) = "x") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+@pytest.mark.s3select
+def test_nullif_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 = _2  ;")  ).replace("\n","")
+
+    assert res_s3select_nullif == res_s3select
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where not nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2  ;")  ).replace("\n","")
+
+    assert res_s3select_nullif == res_s3select
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where  nullif(_1,_2) = _1 ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2  ;")  ).replace("\n","")
+
+    assert res_s3select_nullif == res_s3select
+
+@pytest.mark.s3select
+def test_lowerupper_expressions():
+
+    csv_obj = create_random_csv_object(1,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from stdin ;')  ).replace("\n","")
+
+    assert res_s3select == "ab12cd$$"
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from stdin ;')  ).replace("\n","")
+
+    assert res_s3select == "AB12CD$$"
+
+@pytest.mark.s3select
+def test_in_expressions():
+
+    # purpose of test: engine is process correctly several projections containing aggregation-functions 
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1;')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1,0);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) in(1,0,2);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where _1 like "_3";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+@pytest.mark.s3select
+def test_like_expressions():
+
+    csv_obj = create_random_csv_object_string(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,11,4) = "aeio" ;')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "cbcd%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,1,4) = "cbcd";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "%y[y-z]";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) between "y" and "z" and substring(_3,character_length(_3)-1,1) = "y";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%yz";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2),1) = "z" and substring(_2,character_length(_2)-1,1) = "y";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "c%z";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%xy_";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2)-1,1) = "y" and substring(_2,character_length(_2)-2,1) = "x";')).replace("\n","")
+
+    assert res_s3select_in == res_s3select 
+
+
+@pytest.mark.s3select
+def test_complex_expressions():
+
+    # purpose of test: engine is process correctly several projections containing aggregation-functions 
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;")).replace("\n","")
+
+    min_1 = min ( create_list_of_int( 1 , csv_obj ) )
+    max_2 = max ( create_list_of_int( 2 , csv_obj ) )
+    min_3 = min ( create_list_of_int( 3 , csv_obj ) ) + 1
+
+    __res = "{},{},{}".format(min_1,max_2,min_3)
+    
+    # assert is according to radom-csv function 
+    s3select_assert_result( res_s3select, __res )
+
+    # purpose of test that all where conditions create the same group of values, thus same result
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where substring(_2,1,1) = "1" and char_length(_2) = 3;')).replace("\n","")
+
+    res_s3select_between_numbers = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)>=100 and int(_2)<200;')).replace("\n","")
+
+    res_s3select_eq_modolu = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)/100 = 1 and character_length(_2) = 3;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_substring, res_s3select_between_numbers)
+
+    s3select_assert_result( res_s3select_between_numbers, res_s3select_eq_modolu)
+    
+@pytest.mark.s3select
+def test_alias():
+
+    # purpose: test is comparing result of exactly the same queries , one with alias the other without.
+    # this test is setting alias on 3 projections, the third projection is using other projection alias, also the where clause is using aliases
+    # the test validate that where-clause and projections are executing aliases correctly, bare in mind that each alias has its own cache,
+    # and that cache need to be invalidate per new row. 
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1, int(_2) as a2 , (a1+a2) as a3 from s3object where a3>100 and a3<300;")  ).replace(",","")
+
+    res_s3select_no_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1),int(_2),int(_1)+int(_2) from s3object where (int(_1)+int(_2))>100 and (int(_1)+int(_2))<300;")  ).replace(",","")
+
+    s3select_assert_result( res_s3select_alias, res_s3select_no_alias)
+
+
+@pytest.mark.s3select
+def test_alias_cyclic_refernce():
+
+    number_of_rows = 10000
+    
+    # purpose of test is to validate the s3select-engine is able to detect a cyclic reference to alias.
+    csv_obj = create_random_csv_object(number_of_rows,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1,int(_2) as a2, a1+a4 as a3, a5+a1 as a4, int(_3)+a3 as a5 from s3object;")  )
+
+    find_res = res_s3select_alias.find("number of calls exceed maximum size, probably a cyclic reference to alias")
+    
+    assert int(find_res) >= 0 
+
+@pytest.mark.s3select
+def test_datetime():
+
+    # purpose of test is to validate date-time functionality is correct,
+    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
+
+    csv_obj = create_csv_object_for_datetime(10000,1)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where extract(year from to_timestamp(_1)) > 1950 and extract(year from to_timestamp(_1)) < 1960;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
+
+    res_s3select_date_time_to_string = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(to_string(to_timestamp(_1), \'x\') as int) from  s3object;')  )
+
+    res_s3select_date_time_extract = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(timezone_hour from to_timestamp(_1)) from  s3object;')  )
+
+    s3select_assert_result( res_s3select_date_time_to_string, res_s3select_date_time_extract )
+
+    res_s3select_date_time_to_timestamp = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(month from to_timestamp(_1)) from s3object where extract(month from to_timestamp(_1)) = 5;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(substring(_1, 5, 2) as int) from s3object where _1 like \'____05%\';')  )
+
+    s3select_assert_result( res_s3select_date_time_to_timestamp, res_s3select_substring)
+
+@pytest.mark.s3select
+def test_true_false_datetime():
+
+    # purpose of test is to validate date-time functionality is correct,
+    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
+
+    csv_obj = create_csv_object_for_datetime(10000,1)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (extract(year from to_timestamp(_1)) > 1950) = true and (extract(year from to_timestamp(_1)) < 1960) = true;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where  (date_diff(month,to_timestamp(_1),date_add(month,2,to_timestamp(_1)) ) = 2) = true;')  )
+
+    res_s3select_count = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_count)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(year,to_timestamp(_1),date_add(day, 366 ,to_timestamp(_1))) = 1) = true ;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_count)
+
+    # validate that utcnow is integrate correctly with other date-time functions 
+    res_s3select_date_time_utcnow = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(hour,utcnow(),date_add(day,1,utcnow())) = 24) = true ;')  )
+
+    s3select_assert_result( res_s3select_date_time_utcnow, res_s3select_count)
+
+@pytest.mark.s3select
+def test_csv_parser():
+
+    # purpuse: test default csv values(, \n " \ ), return value may contain meta-char 
+    # NOTE: should note that default meta-char for s3select are also for python, thus for one example double \ is mandatory
+
+    csv_obj = r',first,,,second,third="c31,c32,c33",forth="1,2,3,4",fifth=my_string=\"any_value\" \, my_other_string=\"aaaa\,bbb\" ,' + "\n"
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    # return value contain comma{,}
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _6 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'third=c31,c32,c33')
+
+    # return value contain comma{,}
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _7 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'forth=1,2,3,4')
+
+    # return value contain comma{,}{"}, escape-rule{\} by-pass quote{"} , the escape{\} is removed.
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _8 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'fifth=my_string="any_value" , my_other_string="aaaa,bbb" ')
+
+    # return NULL as first token
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL in the middle of line
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _3 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL in the middle of line (successive)
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _4 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL at the end line
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _9 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+@pytest.mark.s3select
+def test_csv_definition():
+
+    number_of_rows = 10000
+
+    #create object with pipe-sign as field separator and tab as row delimiter.
+    csv_obj = create_random_csv_object(number_of_rows,10,"|","\t")
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+   
+    # purpose of tests is to parse correctly input with different csv defintions  
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;","|","\t") ).replace(",","")
+
+    s3select_assert_result( number_of_rows, int(res))
+    
+    # assert is according to radom-csv function 
+    # purpose of test is validate that tokens are processed correctly
+    res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;","|","\t") ).replace("\n","")
+
+    min_1 = min ( create_list_of_int( 1 , csv_obj , "|","\t") )
+    max_2 = max ( create_list_of_int( 2 , csv_obj , "|","\t") )
+    min_3 = min ( create_list_of_int( 3 , csv_obj , "|","\t") ) + 1
+
+    __res = "{},{},{}".format(min_1,max_2,min_3)
+    s3select_assert_result( res_s3select, __res )
+
+
+@pytest.mark.s3select
+def test_schema_definition():
+
+    number_of_rows = 10000
+
+    # purpose of test is to validate functionality using csv header info
+    csv_obj = create_random_csv_object(number_of_rows,10,csv_schema="c1,c2,c3,c4,c5,c6,c7,c8,c9,c10")
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    # ignoring the schema on first line and retrieve using generic column number
+    res_ignore = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _1,_3 from s3object;",csv_header_info="IGNORE") ).replace("\n","")
+
+    # using the scheme on first line, query is using the attach schema
+    res_use = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c3 from s3object;",csv_header_info="USE") ).replace("\n","")
+    # result of both queries should be the same
+    s3select_assert_result( res_ignore, res_use)
+
+    # using column-name not exist in schema
+    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c10,int(c11) from s3object;",csv_header_info="USE") ).replace("\n","")
+
+    assert ((res_multiple_defintion.find("alias {c11} or column not exist in schema")) >= 0)
+
+    #find_processing_error = res_multiple_defintion.find("ProcessingTimeError")
+    assert ((res_multiple_defintion.find("ProcessingTimeError")) >= 0)
+
+    # alias-name is identical to column-name
+    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(c1)+int(c2) as c4,c4 from s3object;",csv_header_info="USE") ).replace("\n","")
+
+    assert ((res_multiple_defintion.find("multiple definition of column {c4} as schema-column and alias"))  >= 0)
+
+@pytest.mark.s3select
+def test_when_then_else_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case when cast(_1 as int)>100 and cast(_1 as int)<200 then "(100-200)" when cast(_1 as int)>200 and cast(_1 as int)<300 then "(200-300)" else "NONE" end from s3object;')  ).replace("\n","")
+
+    count1 = res_s3select.count("(100-200)")  
+
+    count2 = res_s3select.count("(200-300)") 
+
+    count3 = res_s3select.count("NONE")
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>100 and cast(_1 as int)<200  ;')  ).replace("\n","")
+
+    res1 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>200 and cast(_1 as int)<300  ;')  ).replace("\n","")
+    
+    res2 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)<=100 or cast(_1 as int)>=300 or cast(_1 as int)=200  ;')  ).replace("\n","")
+
+    s3select_assert_result( str(count1) , res)
+
+    s3select_assert_result( str(count2) , res1)
+
+    s3select_assert_result( str(count3) , res2)
+
+@pytest.mark.s3select
+def test_coalesce_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>2 and char_length(_4)>2 and cast(substring(_3,1,2) as int) = cast(substring(_4,1,2) as int);')  ).replace("\n","")  
+
+    res_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_4 as int)>99 and coalesce(nullif(cast(substring(_3,1,2) as int),cast(substring(_4,1,2) as int)),7) = 7;' ) ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res_null)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(nullif(_5,_5),nullif(_1,_1),_2) from s3object;')  ).replace("\n","") 
+
+    res_coalesce = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(_2) from s3object;')  ).replace("\n","")   
+
+    s3select_assert_result( res_s3select, res_coalesce)
+
+
+@pytest.mark.s3select
+def test_cast_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>999;')  ).replace("\n","")  
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>3;')  ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_3 as int)<1000;')  ).replace("\n","")  
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)=3;')  ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res)
+
+@pytest.mark.s3select
+def test_version():
+
+    return
+    number_of_rows = 1
+
+    # purpose of test is to validate functionality using csv header info
+    csv_obj = create_random_csv_object(number_of_rows,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_version = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select version() from s3object;") ).replace("\n","")
+
+    s3select_assert_result( res_version, "41.a," )
+
+@pytest.mark.s3select
+def test_trim_expressions():
+
+    csv_obj = create_random_csv_object_trim(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    ";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+@pytest.mark.s3select
+def test_truefalse_trim_expressions():
+
+    csv_obj = create_random_csv_object_trim(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    " = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+@pytest.mark.s3select
+def test_escape_expressions():
+
+    csv_obj = create_random_csv_object_escape(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%_ar" escape "%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,char_length(_1),1) = "r" and substring(_1,char_length(_1)-1,1) = "a" and substring(_1,char_length(_1)-2,1) = "_";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_escape, res_s3select )
+
+    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio$_" escape "$";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,5) = "aeio_";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_escape, res_s3select )
+
+@pytest.mark.s3select
+def test_case_value_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_case = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case cast(_1 as int) when cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select case when cast(_1 as int) = cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_case, res_s3select )
+
+@pytest.mark.s3select
+def test_bool_cast_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_cast = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(int(_1) as bool) = true ;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where cast(_1 as int) != 0 ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_cast, res_s3select )
+
+@pytest.mark.s3select
+def test_progress_expressions():
+
+    csv_obj = create_random_csv_object(1000000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    obj_size = len(csv_obj.encode('utf-8'))
+
+    result_status = {}
+    result_size = 0
+
+    res_s3select_response,result_status = run_s3select(bucket_name,csv_obj_name,"select sum(int(_1)) from s3object;",progress = True)
+
+    for rec in res_s3select_response:
+        result_size += len(rec['Payload'])
+
+    records_payload_size = result_size
+   
+    # To do: Validate bytes processed after supporting compressed data
+    s3select_assert_result(obj_size, result_status['Progress']['Details']['BytesScanned'])
+    s3select_assert_result(records_payload_size, result_status['Progress']['Details']['BytesReturned'])
+
+    # stats response payload validation
+    s3select_assert_result(obj_size, result_status['Stats']['Details']['BytesScanned'])
+    s3select_assert_result(records_payload_size, result_status['Stats']['Details']['BytesReturned'])
+
+    # end response
+    s3select_assert_result({}, result_status['End'])
+
+@pytest.mark.s3select
+def test_output_serial_expressions():
+    return # TODO fix test
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = get_new_bucket_name()
+
+    upload_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_1 = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,"select _1, _2 from s3object where nullif(_1,_2) is null ;", "ALWAYS")  ).replace("\n",",").replace(",","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1, _2 from s3object where _1 = _2 ;")  ).replace("\n",",")
+
+    res_s3select_list = res_s3select.split(',')
+
+    res_s3select_list.pop()
+
+    res_s3select_final = (''.join('"' + item + '"' for item in res_s3select_list))
+
+    s3select_assert_result( '""'+res_s3select_1+'""', res_s3select_final)
+
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ASNEEDED", '$', '#')).replace("\n","#") ## TODO why \n appears in output?
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
+    
+    res_s3select_list = res_s3select.split('#')
+
+    res_s3select_list.pop()
+
+    res_s3select_final = (''.join(item + '#' for item in res_s3select_list))
+
+
+    s3select_assert_result(res_s3select_in , res_s3select_final )
+
+
+    res_s3select_quot = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ALWAYS", '$', '#')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
+    res_s3select_list = res_s3select.split('#')
+
+    res_s3select_list.pop()
+
+    res_s3select_final = (''.join('"' + item + '"' + '#' for item in res_s3select_list))
+
+    s3select_assert_result( '""#'+res_s3select_quot+'""#', res_s3select_final )
diff --git a/s3tests/functional/test_sns.py b/s3tests/functional/test_sns.py
new file mode 100644 (file)
index 0000000..360f14e
--- /dev/null
@@ -0,0 +1,159 @@
+import json
+import pytest
+from botocore.exceptions import ClientError
+from . import (
+    configfile,
+    get_iam_root_client,
+    get_iam_alt_root_client,
+    get_new_bucket_name,
+    get_prefix,
+    nuke_prefixed_buckets,
+)
+from .iam import iam_root, iam_alt_root
+from .utils import assert_raises, _get_status_and_error_code
+
+def get_new_topic_name():
+    return get_new_bucket_name()
+
+def nuke_topics(client, prefix):
+    p = client.get_paginator('list_topics')
+    for response in p.paginate():
+        for topic in response['Topics']:
+            arn = topic['TopicArn']
+            if prefix not in arn:
+                pass
+            try:
+                client.delete_topic(TopicArn=arn)
+            except:
+                pass
+
+@pytest.fixture
+def sns(iam_root):
+    client = get_iam_root_client(service_name='sns')
+    yield client
+    nuke_topics(client, get_prefix())
+
+@pytest.fixture
+def sns_alt(iam_alt_root):
+    client = get_iam_alt_root_client(service_name='sns')
+    yield client
+    nuke_topics(client, get_prefix())
+
+@pytest.fixture
+def s3(iam_root):
+    client = get_iam_root_client(service_name='s3')
+    yield client
+    nuke_prefixed_buckets(get_prefix(), client)
+
+@pytest.fixture
+def s3_alt(iam_alt_root):
+    client = get_iam_alt_root_client(service_name='s3')
+    yield client
+    nuke_prefixed_buckets(get_prefix(), client)
+
+
+@pytest.mark.iam_account
+@pytest.mark.sns
+def test_account_topic(sns):
+    name = get_new_topic_name()
+
+    response = sns.create_topic(Name=name)
+    arn = response['TopicArn']
+    assert arn.startswith('arn:aws:sns:')
+    assert arn.endswith(f':{name}')
+
+    response = sns.list_topics()
+    assert arn in [p['TopicArn'] for p in response['Topics']]
+
+    sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
+
+    response = sns.get_topic_attributes(TopicArn=arn)
+    assert 'Attributes' in response
+
+    sns.delete_topic(TopicArn=arn)
+
+    response = sns.list_topics()
+    assert arn not in [p['TopicArn'] for p in response['Topics']]
+
+    with pytest.raises(sns.exceptions.NotFoundException):
+        sns.get_topic_attributes(TopicArn=arn)
+    sns.delete_topic(TopicArn=arn)
+
+@pytest.mark.iam_account
+@pytest.mark.sns
+def test_cross_account_topic(sns, sns_alt):
+    name = get_new_topic_name()
+    arn = sns.create_topic(Name=name)['TopicArn']
+
+    # not visible to any alt user apis
+    with pytest.raises(sns.exceptions.NotFoundException):
+        sns_alt.get_topic_attributes(TopicArn=arn)
+    with pytest.raises(sns.exceptions.NotFoundException):
+        sns_alt.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
+
+    # delete returns success
+    sns_alt.delete_topic(TopicArn=arn)
+
+    response = sns_alt.list_topics()
+    assert arn not in [p['TopicArn'] for p in response['Topics']]
+
+@pytest.mark.iam_account
+@pytest.mark.sns
+def test_account_topic_publish(sns, s3):
+    name = get_new_topic_name()
+
+    response = sns.create_topic(Name=name)
+    topic_arn = response['TopicArn']
+
+    bucket = get_new_bucket_name()
+    s3.create_bucket(Bucket=bucket)
+
+    config = {'TopicConfigurations': [{
+        'Id': 'id',
+        'TopicArn': topic_arn,
+        'Events': [ 's3:ObjectCreated:*' ],
+        }]}
+    s3.put_bucket_notification_configuration(
+            Bucket=bucket, NotificationConfiguration=config)
+
+@pytest.mark.iam_account
+@pytest.mark.iam_cross_account
+@pytest.mark.sns
+def test_cross_account_topic_publish(sns, s3_alt, iam_alt_root):
+    name = get_new_topic_name()
+
+    response = sns.create_topic(Name=name)
+    topic_arn = response['TopicArn']
+
+    bucket = get_new_bucket_name()
+    s3_alt.create_bucket(Bucket=bucket)
+
+    config = {'TopicConfigurations': [{
+        'Id': 'id',
+        'TopicArn': topic_arn,
+        'Events': [ 's3:ObjectCreated:*' ],
+        }]}
+
+    # expect AccessDenies because no resource policy allows cross-account access
+    e = assert_raises(ClientError, s3_alt.put_bucket_notification_configuration,
+                      Bucket=bucket, NotificationConfiguration=config)
+    status, error_code = _get_status_and_error_code(e.response)
+    assert status == 403
+    assert error_code == 'AccessDenied'
+
+    # add topic policy to allow the alt user
+    alt_principal = iam_alt_root.get_user()['User']['Arn']
+    policy = json.dumps({
+        'Version': '2012-10-17',
+        'Statement': [{
+            'Effect': 'Allow',
+            'Principal': {'AWS': alt_principal},
+            'Action': 'sns:Publish',
+            'Resource': topic_arn
+            }]
+        })
+    sns.set_topic_attributes(TopicArn=topic_arn, AttributeName='Policy',
+                             AttributeValue=policy)
+
+    s3_alt.put_bucket_notification_configuration(
+            Bucket=bucket, NotificationConfiguration=config)
diff --git a/s3tests/functional/test_sts.py b/s3tests/functional/test_sts.py
new file mode 100644 (file)
index 0000000..b13f56d
--- /dev/null
@@ -0,0 +1,2071 @@
+import boto3
+import botocore.session
+from botocore.exceptions import ClientError
+from botocore.exceptions import ParamValidationError
+import pytest
+import isodate
+import email.utils
+import datetime
+import threading
+import re
+import pytz
+from collections import OrderedDict
+import requests
+import json
+import base64
+import hmac
+import hashlib
+import xml.etree.ElementTree as ET
+import time
+import operator
+import os
+import string
+import random
+import socket
+import ssl
+import logging
+from collections import namedtuple
+
+from email.header import decode_header
+
+from . import(
+    configfile,
+    setup_teardown,
+    get_iam_client,
+    get_sts_client,
+    get_client,
+    get_alt_user_id,
+    get_config_endpoint,
+    get_new_bucket_name,
+    get_parameter_name,
+    get_main_aws_access_key,
+    get_main_aws_secret_key,
+    get_thumbprint,
+    get_aud,
+    get_token,
+    get_realm_name,
+    check_webidentity,
+    get_iam_access_key,
+    get_iam_secret_key,
+    get_sub,
+    get_azp,
+    get_user_token
+    )
+
+log = logging.getLogger(__name__)
+
+def create_role(iam_client,path,rolename,policy_document,description,sessionduration,permissionboundary,tag_list=None):
+    role_err=None
+    role_response = None
+    if rolename is None:
+        rolename=get_parameter_name()
+    if tag_list is None:
+        tag_list = []
+    try:
+        role_response = iam_client.create_role(Path=path,RoleName=rolename,AssumeRolePolicyDocument=policy_document,Tags=tag_list)
+    except ClientError as e:
+       role_err = e.response['Code']
+    return (role_err,role_response,rolename)
+
+def put_role_policy(iam_client,rolename,policyname,role_policy):
+    role_err=None
+    role_response = None
+    if policyname is None:
+        policyname=get_parameter_name() 
+    try:
+        role_response = iam_client.put_role_policy(RoleName=rolename,PolicyName=policyname,PolicyDocument=role_policy)
+    except ClientError as e:
+       role_err = e.response['Code']
+    return (role_err,role_response)
+
+def put_user_policy(iam_client,username,policyname,policy_document):
+    role_err=None
+    role_response = None
+    if policyname is None:
+        policyname=get_parameter_name()
+    try:
+        role_response = iam_client.put_user_policy(UserName=username,PolicyName=policyname,PolicyDocument=policy_document)
+    except ClientError as e:
+        role_err = e.response['Code']
+    return (role_err,role_response,policyname)
+
+def get_s3_client_using_iam_creds():
+    iam_access_key = get_iam_access_key()
+    iam_secret_key = get_iam_secret_key()
+    default_endpoint = get_config_endpoint()
+
+    s3_client_iam_creds = boto3.client('s3',
+                              aws_access_key_id = iam_access_key,
+                              aws_secret_access_key = iam_secret_key,
+                              endpoint_url=default_endpoint,
+                              region_name='',
+                          )
+
+    return s3_client_iam_creds
+
+def create_oidc_provider(iam_client, url, clientidlist, thumbprintlist):
+    oidc_arn = None
+    oidc_error = None
+    clientids = []
+    if clientidlist is None:
+        clientidlist=clientids
+    try:
+        oidc_response = iam_client.create_open_id_connect_provider(
+            Url=url,
+            ClientIDList=clientidlist,
+            ThumbprintList=thumbprintlist,
+        )
+        oidc_arn = oidc_response['OpenIDConnectProviderArn']
+        print (oidc_arn)
+    except ClientError as e:
+        oidc_error = e.response['Code']
+        print (oidc_error)
+        try:
+            oidc_error = None
+            print (url)
+            if url.startswith('http://'):
+                url = url[len('http://'):]
+            elif url.startswith('https://'):
+                url = url[len('https://'):]
+            elif url.startswith('www.'):
+                url = url[len('www.'):]
+            oidc_arn = 'arn:aws:iam:::oidc-provider/{}'.format(url)
+            print (url)
+            print (oidc_arn)
+            oidc_response = iam_client.get_open_id_connect_provider(OpenIDConnectProviderArn=oidc_arn)
+        except ClientError as e:
+            oidc_arn = None
+    return (oidc_arn, oidc_error)
+
+def get_s3_resource_using_iam_creds():
+    iam_access_key = get_iam_access_key()
+    iam_secret_key = get_iam_secret_key()
+    default_endpoint = get_config_endpoint()
+
+    s3_res_iam_creds = boto3.resource('s3',
+                              aws_access_key_id = iam_access_key,
+                              aws_secret_access_key = iam_secret_key,
+                              endpoint_url=default_endpoint,
+                              region_name='',
+                          )
+
+    return s3_res_iam_creds
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_get_session_token():
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    
+    user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
+    (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+    
+    response=sts_client.get_session_token()
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    
+    s3_client=boto3.client('s3',
+                aws_access_key_id = response['Credentials']['AccessKeyId'],
+               aws_secret_access_key = response['Credentials']['SecretAccessKey'],
+                aws_session_token = response['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+        assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+        finish=s3_client.delete_bucket(Bucket=bucket_name)
+    finally: # clean up user policy even if create_bucket/delete_bucket fails
+        iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name)
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_assume_role_allow():
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    if role_response:
+        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    else:
+        assert False, role_error
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_assume_role_deny():
+    s3bucket_error=None
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    if role_response:
+        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    else:
+        assert False, role_error
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    assert s3bucket_error == 'AccessDenied'
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_assume_role_creds_expiry():
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    if role_response:
+        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    else:
+        assert False, role_error
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,DurationSeconds=900)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+    time.sleep(900)
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    assert s3bucket_error == 'AccessDenied'
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_assume_role_deny_head_nonexistent():
+    # create a bucket with the normal s3 client
+    bucket_name = get_new_bucket_name()
+    get_client().create_bucket(Bucket=bucket_name)
+
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+
+    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    if role_response:
+        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name
+    else:
+        assert False, role_error
+
+    # allow GetObject but deny ListBucket
+    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":"s3:GetObject","Principal":"*","Resource":"arn:aws:s3:::*"}}'
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='')
+    status=200
+    try:
+        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
+    except ClientError as e:
+        status = e.response['ResponseMetadata']['HTTPStatusCode']
+    assert status == 403
+
+@pytest.mark.test_of_sts
+@pytest.mark.fails_on_dbstore
+def test_assume_role_allow_head_nonexistent():
+    # create a bucket with the normal s3 client
+    bucket_name = get_new_bucket_name()
+    get_client().create_bucket(Bucket=bucket_name)
+
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+
+    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    if role_response:
+        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name
+    else:
+        assert False, role_error
+
+    # allow GetObject and ListBucket
+    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":["s3:GetObject","s3:ListBucket"],"Principal":"*","Resource":"arn:aws:s3:::*"}}'
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='')
+    status=200
+    try:
+        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
+    except ClientError as e:
+        status = e.response['ResponseMetadata']['HTTPStatusCode']
+    assert status == 404
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.token_claims_trust_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity():
+    check_webidentity()
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+    
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    if response:
+        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+    else:
+        assert False, role_err
+    
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+    
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+'''
+@pytest.mark.webidentity_test
+def test_assume_role_with_web_identity_invalid_webtoken():
+    resp_error=None
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=""
+    try:
+        resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken='abcdef')
+    except InvalidIdentityTokenException as e:
+        log.debug('{}'.format(resp))
+        log.debug('{}'.format(e.response.get("Error", {}).get("Code")))
+        log.debug('{}'.format(e))
+        resp_error = e.response.get("Error", {}).get("Code")
+    assert resp_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+'''
+
+#######################
+# Session Policy Tests
+#######################
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_on_different_buckets():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::test2\",\"arn:aws:s3:::test2/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_name_1 = 'test1'
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name_1)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    assert s3bucket_error == 'AccessDenied'
+
+    bucket_name_2 = 'test2'
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name_2)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    assert s3bucket_error == 'AccessDenied'
+
+    bucket_body = 'please-write-something'
+    #body.encode(encoding='utf_8')
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'NoSuchBucket'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_on_same_bucket():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_put_obj_denial():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_swapping_role_policy_and_session_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_different_op_permissions():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_with_deny_effect():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_check_with_deny_on_same_op():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_bucket_policy_role_arn():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolearn = "arn:aws:iam:::role/" + general_role_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolearn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    try:
+        obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3object_error = e.response.get("Error", {}).get("Code")
+    assert s3object_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_bucket_policy_session_arn():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+    })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_copy_object():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    print (rolesessionarn)
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    copy_source = {
+    'Bucket': bucket_name_1,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, bucket_name_1, "test-2.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-2.txt")
+    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_no_bucket_role_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\",\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3putobj_error = e.response.get("Error", {}).get("Code")
+    assert s3putobj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.session_policy
+@pytest.mark.fails_on_dbstore
+def test_session_policy_bucket_policy_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+    })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3putobj_error = e.response.get("Error", {}).get("Code")
+    assert s3putobj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.token_claims_trust_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_with_sub():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    sub=get_sub()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":sub\":\""+sub+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.token_claims_trust_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_with_azp():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    azp=get_azp()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":azp\":\""+azp+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_request_tag_trust_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_with_request_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_principal_tag_role_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_with_principal_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:PrincipalTag/Department\":\"Engineering\"}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_principal_tag_role_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_for_all_values():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\",\"Marketing\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_principal_tag_role_policy_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_for_all_values_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    #ForAllValues: The condition returns true if every key value in the request matches at least one value in the policy
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    assert s3bucket_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_tag_keys_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_tag_keys_trust_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":\"Department\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAnyValue:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_tag_keys_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_tag_keys_role_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":[\"Department\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_resource_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_resource_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_resource_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_resource_tag_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_resource_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_wrong_resource_tag_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'WrongResourcetag'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    assert s3_put_obj_error == 'AccessDenied'
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_resource_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_resource_tag_princ_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    tags = 'Department=Engineering&Department=Marketing'
+    key = "test-1.txt"
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
+    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_resource_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_resource_tag_copy_obj():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    #create two buckets and add same tags to both
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    copy_bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=copy_bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(copy_bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    tags = 'Department=Engineering'
+    key = "test-1.txt"
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    #copy to same bucket
+    copy_source = {
+    'Bucket': bucket_name,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, bucket_name, "test-2.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key="test-2.txt")
+    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    #copy to another bucket
+    copy_source = {
+    'Bucket': bucket_name,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, copy_bucket_name, "test-1.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=copy_bucket_name, Key="test-1.txt")
+    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@pytest.mark.webidentity_test
+@pytest.mark.abac_test
+@pytest.mark.token_role_tags_test
+@pytest.mark.fails_on_dbstore
+def test_assume_role_with_web_identity_role_resource_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    #iam:ResourceTag refers to the tag attached to role, hence the role is allowed to be assumed only when it has a tag matching the policy.
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"iam:ResourceTag/Department\":\"Engineering\"}}}]}"
+    tags_list = [
+            {'Key':'Department','Value':'Engineering'},
+            {'Key':'Department','Value':'Marketing'}
+        ]
+
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None,tags_list)
+    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
diff --git a/s3tests/functional/test_utils.py b/s3tests/functional/test_utils.py
new file mode 100644 (file)
index 0000000..c0dd398
--- /dev/null
@@ -0,0 +1,9 @@
+from . import utils
+
+def test_generate():
+    FIVE_MB = 5 * 1024 * 1024
+    assert len(''.join(utils.generate_random(0))) == 0
+    assert len(''.join(utils.generate_random(1))) == 1
+    assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1
+    assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB
+    assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1
diff --git a/s3tests/functional/utils.py b/s3tests/functional/utils.py
new file mode 100644 (file)
index 0000000..ab84c16
--- /dev/null
@@ -0,0 +1,47 @@
+import random
+import requests
+import string
+import time
+
+def assert_raises(excClass, callableObj, *args, **kwargs):
+    """
+    Like unittest.TestCase.assertRaises, but returns the exception.
+    """
+    try:
+        callableObj(*args, **kwargs)
+    except excClass as e:
+        return e
+    else:
+        if hasattr(excClass, '__name__'):
+            excName = excClass.__name__
+        else:
+            excName = str(excClass)
+        raise AssertionError("%s not raised" % excName)
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        s = s + strpart[:(this_part_size % chunk)]
+        yield s
+        if (x == size):
+            return
+
+def _get_status(response):
+    status = response['ResponseMetadata']['HTTPStatusCode']
+    return status
+
+def _get_status_and_error_code(response):
+    status = response['ResponseMetadata']['HTTPStatusCode']
+    error_code = response['Error']['Code']
+    return status, error_code
diff --git a/s3tests_boto3/__init__.py b/s3tests_boto3/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/s3tests_boto3/common.py b/s3tests_boto3/common.py
deleted file mode 100644 (file)
index 987ec6b..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-import boto.s3.connection
-import munch
-import itertools
-import os
-import random
-import string
-import yaml
-import re
-from lxml import etree
-
-from doctest import Example
-from lxml.doctestcompare import LXMLOutputChecker
-
-s3 = munch.Munch()
-config = munch.Munch()
-prefix = ''
-
-bucket_counter = itertools.count(1)
-key_counter = itertools.count(1)
-
-def choose_bucket_prefix(template, max_len=30):
-    """
-    Choose a prefix for our test buckets, so they're easy to identify.
-
-    Use template and feed it more and more random filler, until it's
-    as long as possible but still below max_len.
-    """
-    rand = ''.join(
-        random.choice(string.ascii_lowercase + string.digits)
-        for c in range(255)
-        )
-
-    while rand:
-        s = template.format(random=rand)
-        if len(s) <= max_len:
-            return s
-        rand = rand[:-1]
-
-    raise RuntimeError(
-        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
-            template=template,
-            ),
-        )
-
-def nuke_bucket(bucket):
-    try:
-        bucket.set_canned_acl('private')
-        # TODO: deleted_cnt and the while loop is a work around for rgw
-        # not sending the
-        deleted_cnt = 1
-        while deleted_cnt:
-            deleted_cnt = 0
-            for key in bucket.list():
-                print('Cleaning bucket {bucket} key {key}'.format(
-                    bucket=bucket,
-                    key=key,
-                    ))
-                key.set_canned_acl('private')
-                key.delete()
-                deleted_cnt += 1
-        bucket.delete()
-    except boto.exception.S3ResponseError as e:
-        # TODO workaround for buggy rgw that fails to send
-        # error_code, remove
-        if (e.status == 403
-            and e.error_code is None
-            and e.body == ''):
-            e.error_code = 'AccessDenied'
-        if e.error_code != 'AccessDenied':
-            print('GOT UNWANTED ERROR', e.error_code)
-            raise
-        # seems like we're not the owner of the bucket; ignore
-        pass
-
-def nuke_prefixed_buckets():
-    for name, conn in list(s3.items()):
-        print('Cleaning buckets from connection {name}'.format(name=name))
-        for bucket in conn.get_all_buckets():
-            if bucket.name.startswith(prefix):
-                print('Cleaning bucket {bucket}'.format(bucket=bucket))
-                nuke_bucket(bucket)
-
-    print('Done with cleanup of test buckets.')
-
-def read_config(fp):
-    config = munch.Munch()
-    g = yaml.safe_load_all(fp)
-    for new in g:
-        config.update(munch.Munchify(new))
-    return config
-
-def connect(conf):
-    mapping = dict(
-        port='port',
-        host='host',
-        is_secure='is_secure',
-        access_key='aws_access_key_id',
-        secret_key='aws_secret_access_key',
-        )
-    kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
-    #process calling_format argument
-    calling_formats = dict(
-        ordinary=boto.s3.connection.OrdinaryCallingFormat(),
-        subdomain=boto.s3.connection.SubdomainCallingFormat(),
-        vhost=boto.s3.connection.VHostCallingFormat(),
-        )
-    kwargs['calling_format'] = calling_formats['ordinary']
-    if 'calling_format' in conf:
-        raw_calling_format = conf['calling_format']
-        try:
-            kwargs['calling_format'] = calling_formats[raw_calling_format]
-        except KeyError:
-            raise RuntimeError(
-                'calling_format unknown: %r' % raw_calling_format
-                )
-    # TODO test vhost calling format
-    conn = boto.s3.connection.S3Connection(**kwargs)
-    return conn
-
-def setup():
-    global s3, config, prefix
-    s3.clear()
-    config.clear()
-
-    try:
-        path = os.environ['S3TEST_CONF']
-    except KeyError:
-        raise RuntimeError(
-            'To run tests, point environment '
-            + 'variable S3TEST_CONF to a config file.',
-            )
-    with file(path) as f:
-        config.update(read_config(f))
-
-    # These 3 should always be present.
-    if 's3' not in config:
-        raise RuntimeError('Your config file is missing the s3 section!')
-    if 'defaults' not in config.s3:
-        raise RuntimeError('Your config file is missing the s3.defaults section!')
-    if 'fixtures' not in config:
-        raise RuntimeError('Your config file is missing the fixtures section!')
-
-    template = config.fixtures.get('bucket prefix', 'test-{random}-')
-    prefix = choose_bucket_prefix(template=template)
-    if prefix == '':
-        raise RuntimeError("Empty Prefix! Aborting!")
-
-    defaults = config.s3.defaults
-    for section in list(config.s3.keys()):
-        if section == 'defaults':
-            continue
-
-        conf = {}
-        conf.update(defaults)
-        conf.update(config.s3[section])
-        conn = connect(conf)
-        s3[section] = conn
-
-    # WARNING! we actively delete all buckets we see with the prefix
-    # we've chosen! Choose your prefix with care, and don't reuse
-    # credentials!
-
-    # We also assume nobody else is going to use buckets with that
-    # prefix. This is racy but given enough randomness, should not
-    # really fail.
-    nuke_prefixed_buckets()
-
-def get_new_bucket(connection=None):
-    """
-    Get a bucket that exists and is empty.
-
-    Always recreates a bucket from scratch. This is useful to also
-    reset ACLs and such.
-    """
-    if connection is None:
-        connection = s3.main
-    name = '{prefix}{num}'.format(
-        prefix=prefix,
-        num=next(bucket_counter),
-        )
-    # the only way for this to fail with a pre-existing bucket is if
-    # someone raced us between setup nuke_prefixed_buckets and here;
-    # ignore that as astronomically unlikely
-    bucket = connection.create_bucket(name)
-    return bucket
-
-def teardown():
-    nuke_prefixed_buckets()
-
-def with_setup_kwargs(setup, teardown=None):
-    """Decorator to add setup and/or teardown methods to a test function::
-
-      @with_setup_args(setup, teardown)
-      def test_something():
-          " ... "
-
-    The setup function should return (kwargs) which will be passed to
-    test function, and teardown function.
-
-    Note that `with_setup_kwargs` is useful *only* for test functions, not for test
-    methods or inside of TestCase subclasses.
-    """
-    def decorate(func):
-        kwargs = {}
-
-        def test_wrapped(*args, **kwargs2):
-            k2 = kwargs.copy()
-            k2.update(kwargs2)
-            k2['testname'] = func.__name__
-            func(*args, **k2)
-
-        test_wrapped.__name__ = func.__name__
-
-        def setup_wrapped():
-            k = setup()
-            kwargs.update(k)
-            if hasattr(func, 'setup'):
-                func.setup()
-        test_wrapped.setup = setup_wrapped
-
-        if teardown:
-            def teardown_wrapped():
-                if hasattr(func, 'teardown'):
-                    func.teardown()
-                teardown(**kwargs)
-
-            test_wrapped.teardown = teardown_wrapped
-        else:
-            if hasattr(func, 'teardown'):
-                test_wrapped.teardown = func.teardown()
-        return test_wrapped
-    return decorate
-
-# Demo case for the above, when you run test_gen():
-# _test_gen will run twice,
-# with the following stderr printing
-# setup_func {'b': 2}
-# testcase ('1',) {'b': 2, 'testname': '_test_gen'}
-# teardown_func {'b': 2}
-# setup_func {'b': 2}
-# testcase () {'b': 2, 'testname': '_test_gen'}
-# teardown_func {'b': 2}
-# 
-#def setup_func():
-#    kwargs = {'b': 2}
-#    print("setup_func", kwargs, file=sys.stderr)
-#    return kwargs
-#
-#def teardown_func(**kwargs):
-#    print("teardown_func", kwargs, file=sys.stderr)
-#
-#@with_setup_kwargs(setup=setup_func, teardown=teardown_func)
-#def _test_gen(*args, **kwargs):
-#    print("testcase", args, kwargs, file=sys.stderr)
-#
-#def test_gen():
-#    yield _test_gen, '1'
-#    yield _test_gen
-
-def trim_xml(xml_str):
-    p = etree.XMLParser(remove_blank_text=True)
-    elem = etree.XML(xml_str, parser=p)
-    return etree.tostring(elem)
-
-def normalize_xml(xml, pretty_print=True):
-    if xml is None:
-        return xml
-
-    root = etree.fromstring(xml.encode(encoding='ascii'))
-
-    for element in root.iter('*'):
-        if element.text is not None and not element.text.strip():
-            element.text = None
-        if element.text is not None:
-            element.text = element.text.strip().replace("\n", "").replace("\r", "")
-        if element.tail is not None and not element.tail.strip():
-            element.tail = None
-        if element.tail is not None:
-            element.tail = element.tail.strip().replace("\n", "").replace("\r", "")
-
-    # Sort the elements
-    for parent in root.xpath('//*[./*]'): # Search for parent elements
-          parent[:] = sorted(parent,key=lambda x: x.tag)
-
-    xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
-    # there are two different DTD URIs
-    xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
-    xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
-    for uri in ['http://doc.s3.amazonaws.com/doc/2006-03-01/', 'http://s3.amazonaws.com/doc/2006-03-01/']:
-        xmlstr = xmlstr.replace(uri, 'URI-DTD')
-    #xmlstr = re.sub(r'>\s+', '>', xmlstr, count=0, flags=re.MULTILINE)
-    return xmlstr
-
-def assert_xml_equal(got, want):
-    assert want is not None, 'Wanted XML cannot be None'
-    if got is None:
-        raise AssertionError('Got input to validate was None')
-    checker = LXMLOutputChecker()
-    if not checker.check_output(want, got, 0):
-        message = checker.output_difference(Example("", want), got, 0)
-        raise AssertionError(message)
diff --git a/s3tests_boto3/functional/__init__.py b/s3tests_boto3/functional/__init__.py
deleted file mode 100644 (file)
index 555cafc..0000000
+++ /dev/null
@@ -1,766 +0,0 @@
-import pytest
-import boto3
-from botocore import UNSIGNED
-from botocore.client import Config
-from botocore.exceptions import ClientError
-from botocore.handlers import disable_signing
-import configparser
-import datetime
-import time
-import os
-import munch
-import random
-import string
-import itertools
-import urllib3
-import re
-
-config = munch.Munch
-
-# this will be assigned by setup()
-prefix = None
-
-def get_prefix():
-    assert prefix is not None
-    return prefix
-
-def choose_bucket_prefix(template, max_len=30):
-    """
-    Choose a prefix for our test buckets, so they're easy to identify.
-
-    Use template and feed it more and more random filler, until it's
-    as long as possible but still below max_len.
-    """
-    rand = ''.join(
-        random.choice(string.ascii_lowercase + string.digits)
-        for c in range(255)
-        )
-
-    while rand:
-        s = template.format(random=rand)
-        if len(s) <= max_len:
-            return s
-        rand = rand[:-1]
-
-    raise RuntimeError(
-        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
-            template=template,
-            ),
-        )
-
-def get_buckets_list(client=None, prefix=None):
-    if client == None:
-        client = get_client()
-    if prefix == None:
-        prefix = get_prefix()
-    response = client.list_buckets()
-    bucket_dicts = response['Buckets']
-    buckets_list = []
-    for bucket in bucket_dicts:
-        if prefix in bucket['Name']:
-            buckets_list.append(bucket['Name'])
-
-    return buckets_list
-
-def get_objects_list(bucket, client=None, prefix=None):
-    if client == None:
-        client = get_client()
-
-    if prefix == None:
-        response = client.list_objects(Bucket=bucket)
-    else:
-        response = client.list_objects(Bucket=bucket, Prefix=prefix)
-    objects_list = []
-
-    if 'Contents' in response:
-        contents = response['Contents']
-        for obj in contents:
-            objects_list.append(obj['Key'])
-
-    return objects_list
-
-# generator function that returns object listings in batches, where each
-# batch is a list of dicts compatible with delete_objects()
-def list_versions(client, bucket, batch_size):
-    kwargs = {'Bucket': bucket, 'MaxKeys': batch_size}
-    truncated = True
-    while truncated:
-        listing = client.list_object_versions(**kwargs)
-
-        kwargs['KeyMarker'] = listing.get('NextKeyMarker')
-        kwargs['VersionIdMarker'] = listing.get('NextVersionIdMarker')
-        truncated = listing['IsTruncated']
-
-        objs = listing.get('Versions', []) + listing.get('DeleteMarkers', [])
-        if len(objs):
-            yield [{'Key': o['Key'], 'VersionId': o['VersionId']} for o in objs]
-
-def nuke_bucket(client, bucket):
-    batch_size = 128
-    max_retain_date = None
-
-    # list and delete objects in batches
-    for objects in list_versions(client, bucket, batch_size):
-        delete = client.delete_objects(Bucket=bucket,
-                Delete={'Objects': objects, 'Quiet': True},
-                BypassGovernanceRetention=True)
-
-        # check for object locks on 403 AccessDenied errors
-        for err in delete.get('Errors', []):
-            if err.get('Code') != 'AccessDenied':
-                continue
-            try:
-                res = client.get_object_retention(Bucket=bucket,
-                        Key=err['Key'], VersionId=err['VersionId'])
-                retain_date = res['Retention']['RetainUntilDate']
-                if not max_retain_date or max_retain_date < retain_date:
-                    max_retain_date = retain_date
-            except ClientError:
-                pass
-
-    if max_retain_date:
-        # wait out the retention period (up to 60 seconds)
-        now = datetime.datetime.now(max_retain_date.tzinfo)
-        if max_retain_date > now:
-            delta = max_retain_date - now
-            if delta.total_seconds() > 60:
-                raise RuntimeError('bucket {} still has objects \
-locked for {} more seconds, not waiting for \
-bucket cleanup'.format(bucket, delta.total_seconds()))
-            print('nuke_bucket', bucket, 'waiting', delta.total_seconds(),
-                    'seconds for object locks to expire')
-            time.sleep(delta.total_seconds())
-
-        for objects in list_versions(client, bucket, batch_size):
-            client.delete_objects(Bucket=bucket,
-                    Delete={'Objects': objects, 'Quiet': True},
-                    BypassGovernanceRetention=True)
-
-    client.delete_bucket(Bucket=bucket)
-
-def nuke_prefixed_buckets(prefix, client=None):
-    if client == None:
-        client = get_client()
-
-    buckets = get_buckets_list(client, prefix)
-
-    err = None
-    for bucket_name in buckets:
-        try:
-            nuke_bucket(client, bucket_name)
-        except Exception as e:
-            # The exception shouldn't be raised when doing cleanup. Pass and continue
-            # the bucket cleanup process. Otherwise left buckets wouldn't be cleared
-            # resulting in some kind of resource leak. err is used to hint user some
-            # exception once occurred.
-            err = e
-            pass
-    if err:
-        raise err
-
-    print('Done with cleanup of buckets in tests.')
-
-def configured_storage_classes():
-    sc = ['STANDARD']
-
-    extra_sc = re.split(r"[\b\W\b]+", config.storage_classes)
-
-    for item in extra_sc:
-        if item != 'STANDARD':
-             sc.append(item)
-
-    sc = [i for i in sc if i]
-    print("storage classes configured: " + str(sc))
-
-    return sc
-
-def setup():
-    cfg = configparser.RawConfigParser()
-    try:
-        path = os.environ['S3TEST_CONF']
-    except KeyError:
-        raise RuntimeError(
-            'To run tests, point environment '
-            + 'variable S3TEST_CONF to a config file.',
-            )
-    cfg.read(path)
-
-    if not cfg.defaults():
-        raise RuntimeError('Your config file is missing the DEFAULT section!')
-    if not cfg.has_section("s3 main"):
-        raise RuntimeError('Your config file is missing the "s3 main" section!')
-    if not cfg.has_section("s3 alt"):
-        raise RuntimeError('Your config file is missing the "s3 alt" section!')
-    if not cfg.has_section("s3 tenant"):
-        raise RuntimeError('Your config file is missing the "s3 tenant" section!')
-
-    global prefix
-
-    defaults = cfg.defaults()
-
-    # vars from the DEFAULT section
-    config.default_host = defaults.get("host")
-    config.default_port = int(defaults.get("port"))
-    config.default_is_secure = cfg.getboolean('DEFAULT', "is_secure")
-
-    proto = 'https' if config.default_is_secure else 'http'
-    config.default_endpoint = "%s://%s:%d" % (proto, config.default_host, config.default_port)
-
-    try:
-        config.default_ssl_verify = cfg.getboolean('DEFAULT', "ssl_verify")
-    except configparser.NoOptionError:
-        config.default_ssl_verify = False
-
-    # Disable InsecureRequestWarning reported by urllib3 when ssl_verify is False
-    if not config.default_ssl_verify:
-        urllib3.disable_warnings()
-
-    # vars from the main section
-    config.main_access_key = cfg.get('s3 main',"access_key")
-    config.main_secret_key = cfg.get('s3 main',"secret_key")
-    config.main_display_name = cfg.get('s3 main',"display_name")
-    config.main_user_id = cfg.get('s3 main',"user_id")
-    config.main_email = cfg.get('s3 main',"email")
-    try:
-        config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.main_kms_keyid = 'testkey-1'
-
-    try:
-        config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.main_kms_keyid2 = 'testkey-2'
-
-    try:
-        config.main_api_name = cfg.get('s3 main',"api_name")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.main_api_name = ""
-        pass
-
-    try:
-        config.storage_classes = cfg.get('s3 main',"storage_classes")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.storage_classes = ""
-        pass
-
-    try:
-        config.lc_debug_interval = int(cfg.get('s3 main',"lc_debug_interval"))
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.lc_debug_interval = 10
-
-    config.alt_access_key = cfg.get('s3 alt',"access_key")
-    config.alt_secret_key = cfg.get('s3 alt',"secret_key")
-    config.alt_display_name = cfg.get('s3 alt',"display_name")
-    config.alt_user_id = cfg.get('s3 alt',"user_id")
-    config.alt_email = cfg.get('s3 alt',"email")
-
-    config.tenant_access_key = cfg.get('s3 tenant',"access_key")
-    config.tenant_secret_key = cfg.get('s3 tenant',"secret_key")
-    config.tenant_display_name = cfg.get('s3 tenant',"display_name")
-    config.tenant_user_id = cfg.get('s3 tenant',"user_id")
-    config.tenant_email = cfg.get('s3 tenant',"email")
-
-    config.iam_access_key = cfg.get('iam',"access_key")
-    config.iam_secret_key = cfg.get('iam',"secret_key")
-    config.iam_display_name = cfg.get('iam',"display_name")
-    config.iam_user_id = cfg.get('iam',"user_id")
-    config.iam_email = cfg.get('iam',"email")
-
-    config.iam_root_access_key = cfg.get('iam root',"access_key")
-    config.iam_root_secret_key = cfg.get('iam root',"secret_key")
-    config.iam_root_user_id = cfg.get('iam root',"user_id")
-    config.iam_root_email = cfg.get('iam root',"email")
-
-    config.iam_alt_root_access_key = cfg.get('iam alt root',"access_key")
-    config.iam_alt_root_secret_key = cfg.get('iam alt root',"secret_key")
-    config.iam_alt_root_user_id = cfg.get('iam alt root',"user_id")
-    config.iam_alt_root_email = cfg.get('iam alt root',"email")
-
-    # vars from the fixtures section
-    template = cfg.get('fixtures', "bucket prefix", fallback='test-{random}-')
-    prefix = choose_bucket_prefix(template=template)
-    template = cfg.get('fixtures', "iam name prefix", fallback="s3-tests-")
-    config.iam_name_prefix = choose_bucket_prefix(template=template)
-    template = cfg.get('fixtures', "iam path prefix", fallback="/s3-tests/")
-    config.iam_path_prefix = choose_bucket_prefix(template=template)
-
-    alt_client = get_alt_client()
-    tenant_client = get_tenant_client()
-    nuke_prefixed_buckets(prefix=prefix)
-    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
-    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
-
-    if cfg.has_section("s3 cloud"):
-        get_cloud_config(cfg)
-    else:
-        config.cloud_storage_class = None
-
-
-def teardown():
-    alt_client = get_alt_client()
-    tenant_client = get_tenant_client()
-    nuke_prefixed_buckets(prefix=prefix)
-    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
-    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
-    try:
-        iam_client = get_iam_client()
-        list_roles_resp = iam_client.list_roles()
-        for role in list_roles_resp['Roles']:
-            list_policies_resp = iam_client.list_role_policies(RoleName=role['RoleName'])
-            for policy in list_policies_resp['PolicyNames']:
-                del_policy_resp = iam_client.delete_role_policy(
-                                         RoleName=role['RoleName'],
-                                         PolicyName=policy
-                                        )
-            del_role_resp = iam_client.delete_role(RoleName=role['RoleName'])
-        list_oidc_resp = iam_client.list_open_id_connect_providers()
-        for oidcprovider in list_oidc_resp['OpenIDConnectProviderList']:
-            del_oidc_resp = iam_client.delete_open_id_connect_provider(
-                        OpenIDConnectProviderArn=oidcprovider['Arn']
-                    )
-    except:
-        pass
-
-@pytest.fixture(scope="package")
-def configfile():
-    setup()
-    return config
-
-@pytest.fixture(autouse=True)
-def setup_teardown(configfile):
-    yield
-    teardown()
-
-def check_webidentity():
-    cfg = configparser.RawConfigParser()
-    try:
-        path = os.environ['S3TEST_CONF']
-    except KeyError:
-        raise RuntimeError(
-            'To run tests, point environment '
-            + 'variable S3TEST_CONF to a config file.',
-            )
-    cfg.read(path)
-    if not cfg.has_section("webidentity"):
-        raise RuntimeError('Your config file is missing the "webidentity" section!')
-
-    config.webidentity_thumbprint = cfg.get('webidentity', "thumbprint")
-    config.webidentity_aud = cfg.get('webidentity', "aud")
-    config.webidentity_token = cfg.get('webidentity', "token")
-    config.webidentity_realm = cfg.get('webidentity', "KC_REALM")
-    config.webidentity_sub = cfg.get('webidentity', "sub")
-    config.webidentity_azp = cfg.get('webidentity', "azp")
-    config.webidentity_user_token = cfg.get('webidentity', "user_token")
-
-def get_cloud_config(cfg):
-    config.cloud_host = cfg.get('s3 cloud',"host")
-    config.cloud_port = int(cfg.get('s3 cloud',"port"))
-    config.cloud_is_secure = cfg.getboolean('s3 cloud', "is_secure")
-
-    proto = 'https' if config.cloud_is_secure else 'http'
-    config.cloud_endpoint = "%s://%s:%d" % (proto, config.cloud_host, config.cloud_port)
-
-    config.cloud_access_key = cfg.get('s3 cloud',"access_key")
-    config.cloud_secret_key = cfg.get('s3 cloud',"secret_key")
-
-    try:
-        config.cloud_storage_class = cfg.get('s3 cloud', "cloud_storage_class")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.cloud_storage_class = None
-    
-    try:
-        config.cloud_retain_head_object = cfg.get('s3 cloud',"retain_head_object")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.cloud_retain_head_object = None
-
-    try:
-        config.cloud_target_path = cfg.get('s3 cloud',"target_path")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.cloud_target_path = None
-
-    try:
-        config.cloud_target_storage_class = cfg.get('s3 cloud',"target_storage_class")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.cloud_target_storage_class = 'STANDARD'
-
-    try:
-        config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class")
-    except (configparser.NoSectionError, configparser.NoOptionError):
-        config.cloud_regular_storage_class  = None
-
-
-def get_client(client_config=None):
-    if client_config == None:
-        client_config = Config(signature_version='s3v4')
-
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=config.main_access_key,
-                        aws_secret_access_key=config.main_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=client_config)
-    return client
-
-def get_v2_client():
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=config.main_access_key,
-                        aws_secret_access_key=config.main_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=Config(signature_version='s3'))
-    return client
-
-def get_sts_client(**kwargs):
-    kwargs.setdefault('aws_access_key_id', config.alt_access_key)
-    kwargs.setdefault('aws_secret_access_key', config.alt_secret_key)
-    kwargs.setdefault('config', Config(signature_version='s3v4'))
-
-    client = boto3.client(service_name='sts',
-                          endpoint_url=config.default_endpoint,
-                          region_name='',
-                          use_ssl=config.default_is_secure,
-                          verify=config.default_ssl_verify,
-                          **kwargs)
-    return client
-
-def get_iam_client(**kwargs):
-    kwargs.setdefault('aws_access_key_id', config.iam_access_key)
-    kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
-
-    client = boto3.client(service_name='iam',
-                        endpoint_url=config.default_endpoint,
-                        region_name='',
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        **kwargs)
-    return client
-
-def get_iam_s3client(**kwargs):
-    kwargs.setdefault('aws_access_key_id', config.iam_access_key)
-    kwargs.setdefault('aws_secret_access_key', config.iam_secret_key)
-    kwargs.setdefault('config', Config(signature_version='s3v4'))
-
-    client = boto3.client(service_name='s3',
-                          endpoint_url=config.default_endpoint,
-                          use_ssl=config.default_is_secure,
-                          verify=config.default_ssl_verify,
-                          **kwargs)
-    return client
-
-def get_iam_root_client(**kwargs):
-    kwargs.setdefault('service_name', 'iam')
-    kwargs.setdefault('aws_access_key_id', config.iam_root_access_key)
-    kwargs.setdefault('aws_secret_access_key', config.iam_root_secret_key)
-
-    return boto3.client(endpoint_url=config.default_endpoint,
-                        region_name='',
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        **kwargs)
-
-def get_iam_alt_root_client(**kwargs):
-    kwargs.setdefault('service_name', 'iam')
-    kwargs.setdefault('aws_access_key_id', config.iam_alt_root_access_key)
-    kwargs.setdefault('aws_secret_access_key', config.iam_alt_root_secret_key)
-
-    return boto3.client(endpoint_url=config.default_endpoint,
-                        region_name='',
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        **kwargs)
-
-def get_alt_client(client_config=None):
-    if client_config == None:
-        client_config = Config(signature_version='s3v4')
-
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=config.alt_access_key,
-                        aws_secret_access_key=config.alt_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=client_config)
-    return client
-
-def get_cloud_client(client_config=None):
-    if client_config == None:
-        client_config = Config(signature_version='s3v4')
-
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=config.cloud_access_key,
-                        aws_secret_access_key=config.cloud_secret_key,
-                        endpoint_url=config.cloud_endpoint,
-                        use_ssl=config.cloud_is_secure,
-                        config=client_config)
-    return client
-
-def get_tenant_client(client_config=None):
-    if client_config == None:
-        client_config = Config(signature_version='s3v4')
-
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=config.tenant_access_key,
-                        aws_secret_access_key=config.tenant_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=client_config)
-    return client
-
-def get_tenant_iam_client():
-
-    client = boto3.client(service_name='iam',
-                          region_name='us-east-1',
-                          aws_access_key_id=config.tenant_access_key,
-                          aws_secret_access_key=config.tenant_secret_key,
-                          endpoint_url=config.default_endpoint,
-                          verify=config.default_ssl_verify,
-                          use_ssl=config.default_is_secure)
-    return client
-
-def get_alt_iam_client():
-
-    client = boto3.client(service_name='iam',
-                          region_name='',
-                          aws_access_key_id=config.alt_access_key,
-                          aws_secret_access_key=config.alt_secret_key,
-                          endpoint_url=config.default_endpoint,
-                          verify=config.default_ssl_verify,
-                          use_ssl=config.default_is_secure)
-    return client
-
-def get_unauthenticated_client():
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id='',
-                        aws_secret_access_key='',
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=Config(signature_version=UNSIGNED))
-    return client
-
-def get_bad_auth_client(aws_access_key_id='badauth'):
-    client = boto3.client(service_name='s3',
-                        aws_access_key_id=aws_access_key_id,
-                        aws_secret_access_key='roflmao',
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=Config(signature_version='s3v4'))
-    return client
-
-def get_svc_client(client_config=None, svc='s3'):
-    if client_config == None:
-        client_config = Config(signature_version='s3v4')
-
-    client = boto3.client(service_name=svc,
-                        aws_access_key_id=config.main_access_key,
-                        aws_secret_access_key=config.main_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify,
-                        config=client_config)
-    return client
-
-bucket_counter = itertools.count(1)
-
-def get_new_bucket_name():
-    """
-    Get a bucket name that probably does not exist.
-
-    We make every attempt to use a unique random prefix, so if a
-    bucket by this name happens to exist, it's ok if tests give
-    false negatives.
-    """
-    name = '{prefix}{num}'.format(
-        prefix=prefix,
-        num=next(bucket_counter),
-        )
-    return name
-
-def get_new_bucket_resource(name=None):
-    """
-    Get a bucket that exists and is empty.
-
-    Always recreates a bucket from scratch. This is useful to also
-    reset ACLs and such.
-    """
-    s3 = boto3.resource('s3',
-                        aws_access_key_id=config.main_access_key,
-                        aws_secret_access_key=config.main_secret_key,
-                        endpoint_url=config.default_endpoint,
-                        use_ssl=config.default_is_secure,
-                        verify=config.default_ssl_verify)
-    if name is None:
-        name = get_new_bucket_name()
-    bucket = s3.Bucket(name)
-    bucket_location = bucket.create()
-    return bucket
-
-def get_new_bucket(client=None, name=None):
-    """
-    Get a bucket that exists and is empty.
-
-    Always recreates a bucket from scratch. This is useful to also
-    reset ACLs and such.
-    """
-    if client is None:
-        client = get_client()
-    if name is None:
-        name = get_new_bucket_name()
-
-    client.create_bucket(Bucket=name)
-    return name
-
-def get_parameter_name():
-    parameter_name=""
-    rand = ''.join(
-        random.choice(string.ascii_lowercase + string.digits)
-        for c in range(255)
-        )
-    while rand:
-        parameter_name = '{random}'.format(random=rand)
-        if len(parameter_name) <= 10:
-            return parameter_name
-        rand = rand[:-1]
-    return parameter_name
-
-def get_sts_user_id():
-    return config.alt_user_id
-
-def get_config_is_secure():
-    return config.default_is_secure
-
-def get_config_host():
-    return config.default_host
-
-def get_config_port():
-    return config.default_port
-
-def get_config_endpoint():
-    return config.default_endpoint
-
-def get_config_ssl_verify():
-    return config.default_ssl_verify
-
-def get_main_aws_access_key():
-    return config.main_access_key
-
-def get_main_aws_secret_key():
-    return config.main_secret_key
-
-def get_main_display_name():
-    return config.main_display_name
-
-def get_main_user_id():
-    return config.main_user_id
-
-def get_main_email():
-    return config.main_email
-
-def get_main_api_name():
-    return config.main_api_name
-
-def get_main_kms_keyid():
-    return config.main_kms_keyid
-
-def get_secondary_kms_keyid():
-    return config.main_kms_keyid2
-
-def get_alt_aws_access_key():
-    return config.alt_access_key
-
-def get_alt_aws_secret_key():
-    return config.alt_secret_key
-
-def get_alt_display_name():
-    return config.alt_display_name
-
-def get_alt_user_id():
-    return config.alt_user_id
-
-def get_alt_email():
-    return config.alt_email
-
-def get_tenant_aws_access_key():
-    return config.tenant_access_key
-
-def get_tenant_aws_secret_key():
-    return config.tenant_secret_key
-
-def get_tenant_display_name():
-    return config.tenant_display_name
-
-def get_tenant_user_id():
-    return config.tenant_user_id
-
-def get_tenant_email():
-    return config.tenant_email
-
-def get_thumbprint():
-    return config.webidentity_thumbprint
-
-def get_aud():
-    return config.webidentity_aud
-
-def get_sub():
-    return config.webidentity_sub
-
-def get_azp():
-    return config.webidentity_azp
-
-def get_token():
-    return config.webidentity_token
-
-def get_realm_name():
-    return config.webidentity_realm
-
-def get_iam_name_prefix():
-    return config.iam_name_prefix
-
-def make_iam_name(name):
-    return config.iam_name_prefix + name
-
-def get_iam_path_prefix():
-    return config.iam_path_prefix
-
-def get_iam_access_key():
-    return config.iam_access_key
-
-def get_iam_secret_key():
-    return config.iam_secret_key
-
-def get_iam_root_user_id():
-    return config.iam_root_user_id
-
-def get_iam_root_email():
-    return config.iam_root_email
-
-def get_iam_alt_root_user_id():
-    return config.iam_alt_root_user_id
-
-def get_iam_alt_root_email():
-    return config.iam_alt_root_email
-
-def get_user_token():
-    return config.webidentity_user_token
-
-def get_cloud_storage_class():
-    return config.cloud_storage_class
-
-def get_cloud_retain_head_object():
-    return config.cloud_retain_head_object
-
-def get_cloud_regular_storage_class():
-    return config.cloud_regular_storage_class
-
-def get_cloud_target_path():
-    return config.cloud_target_path
-
-def get_cloud_target_storage_class():
-    return config.cloud_target_storage_class
-
-def get_lc_debug_interval():
-    return config.lc_debug_interval
diff --git a/s3tests_boto3/functional/iam.py b/s3tests_boto3/functional/iam.py
deleted file mode 100644 (file)
index a070e5d..0000000
+++ /dev/null
@@ -1,199 +0,0 @@
-from botocore.exceptions import ClientError
-import pytest
-
-from . import (
-    configfile,
-    get_iam_root_client,
-    get_iam_root_user_id,
-    get_iam_root_email,
-    get_iam_alt_root_client,
-    get_iam_alt_root_user_id,
-    get_iam_alt_root_email,
-    get_iam_path_prefix,
-)
-
-def nuke_user_keys(client, name):
-    p = client.get_paginator('list_access_keys')
-    for response in p.paginate(UserName=name):
-        for key in response['AccessKeyMetadata']:
-            try:
-                client.delete_access_key(UserName=name, AccessKeyId=key['AccessKeyId'])
-            except:
-                pass
-
-def nuke_user_policies(client, name):
-    p = client.get_paginator('list_user_policies')
-    for response in p.paginate(UserName=name):
-        for policy in response['PolicyNames']:
-            try:
-                client.delete_user_policy(UserName=name, PolicyName=policy)
-            except:
-                pass
-
-def nuke_attached_user_policies(client, name):
-    p = client.get_paginator('list_attached_user_policies')
-    for response in p.paginate(UserName=name):
-        for policy in response['AttachedPolicies']:
-            try:
-                client.detach_user_policy(UserName=name, PolicyArn=policy['PolicyArn'])
-            except:
-                pass
-
-def nuke_user(client, name):
-    # delete access keys, user policies, etc
-    try:
-        nuke_user_keys(client, name)
-    except:
-        pass
-    try:
-        nuke_user_policies(client, name)
-    except:
-        pass
-    try:
-        nuke_attached_user_policies(client, name)
-    except:
-        pass
-    client.delete_user(UserName=name)
-
-def nuke_users(client, **kwargs):
-    p = client.get_paginator('list_users')
-    for response in p.paginate(**kwargs):
-        for user in response['Users']:
-            try:
-                nuke_user(client, user['UserName'])
-            except:
-                pass
-
-def nuke_group_policies(client, name):
-    p = client.get_paginator('list_group_policies')
-    for response in p.paginate(GroupName=name):
-        for policy in response['PolicyNames']:
-            try:
-                client.delete_group_policy(GroupName=name, PolicyName=policy)
-            except:
-                pass
-
-def nuke_attached_group_policies(client, name):
-    p = client.get_paginator('list_attached_group_policies')
-    for response in p.paginate(GroupName=name):
-        for policy in response['AttachedPolicies']:
-            try:
-                client.detach_group_policy(GroupName=name, PolicyArn=policy['PolicyArn'])
-            except:
-                pass
-
-def nuke_group_users(client, name):
-    p = client.get_paginator('get_group')
-    for response in p.paginate(GroupName=name):
-        for user in response['Users']:
-            try:
-                client.remove_user_from_group(GroupName=name, UserName=user['UserName'])
-            except:
-                pass
-
-def nuke_group(client, name):
-    # delete group policies and remove all users
-    try:
-        nuke_group_policies(client, name)
-    except:
-        pass
-    try:
-        nuke_attached_group_policies(client, name)
-    except:
-        pass
-    try:
-        nuke_group_users(client, name)
-    except:
-        pass
-    client.delete_group(GroupName=name)
-
-def nuke_groups(client, **kwargs):
-    p = client.get_paginator('list_groups')
-    for response in p.paginate(**kwargs):
-        for user in response['Groups']:
-            try:
-                nuke_group(client, user['GroupName'])
-            except:
-                pass
-
-def nuke_role_policies(client, name):
-    p = client.get_paginator('list_role_policies')
-    for response in p.paginate(RoleName=name):
-        for policy in response['PolicyNames']:
-            try:
-                client.delete_role_policy(RoleName=name, PolicyName=policy)
-            except:
-                pass
-
-def nuke_attached_role_policies(client, name):
-    p = client.get_paginator('list_attached_role_policies')
-    for response in p.paginate(RoleName=name):
-        for policy in response['AttachedPolicies']:
-            try:
-                client.detach_role_policy(RoleName=name, PolicyArn=policy['PolicyArn'])
-            except:
-                pass
-
-def nuke_role(client, name):
-    # delete role policies, etc
-    try:
-        nuke_role_policies(client, name)
-    except:
-        pass
-    try:
-        nuke_attached_role_policies(client, name)
-    except:
-        pass
-    client.delete_role(RoleName=name)
-
-def nuke_roles(client, **kwargs):
-    p = client.get_paginator('list_roles')
-    for response in p.paginate(**kwargs):
-        for role in response['Roles']:
-            try:
-                nuke_role(client, role['RoleName'])
-            except:
-                pass
-
-def nuke_oidc_providers(client, prefix):
-    result = client.list_open_id_connect_providers()
-    for provider in result['OpenIDConnectProviderList']:
-        arn = provider['Arn']
-        if f':oidc-provider{prefix}' in arn:
-            try:
-                client.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
-            except:
-                pass
-
-
-# fixture for iam account root user
-@pytest.fixture
-def iam_root(configfile):
-    client = get_iam_root_client()
-    try:
-        arn = client.get_user()['User']['Arn']
-        if not arn.endswith(':root'):
-            pytest.skip('[iam root] user does not have :root arn')
-    except ClientError as e:
-        pytest.skip('[iam root] user does not belong to an account')
-
-    yield client
-    nuke_users(client, PathPrefix=get_iam_path_prefix())
-    nuke_groups(client, PathPrefix=get_iam_path_prefix())
-    nuke_roles(client, PathPrefix=get_iam_path_prefix())
-    nuke_oidc_providers(client, get_iam_path_prefix())
-
-# fixture for iam alt account root user
-@pytest.fixture
-def iam_alt_root(configfile):
-    client = get_iam_alt_root_client()
-    try:
-        arn = client.get_user()['User']['Arn']
-        if not arn.endswith(':root'):
-            pytest.skip('[iam alt root] user does not have :root arn')
-    except ClientError as e:
-        pytest.skip('[iam alt root] user does not belong to an account')
-
-    yield client
-    nuke_users(client, PathPrefix=get_iam_path_prefix())
-    nuke_roles(client, PathPrefix=get_iam_path_prefix())
diff --git a/s3tests_boto3/functional/policy.py b/s3tests_boto3/functional/policy.py
deleted file mode 100644 (file)
index 123496a..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-import json
-
-class Statement(object):
-    def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
-        self.principal = principal
-        self.action = action
-        self.resource = resource
-        self.condition = condition
-        self.effect = effect
-
-    def to_dict(self):
-        d = { "Action" : self.action,
-              "Principal" : self.principal,
-              "Effect" : self.effect,
-              "Resource" : self.resource
-        }
-
-        if self.condition is not None:
-            d["Condition"] = self.condition
-
-        return d
-
-class Policy(object):
-    def __init__(self):
-        self.statements = []
-
-    def add_statement(self, s):
-        self.statements.append(s)
-        return self
-
-    def to_json(self):
-        policy_dict = {
-            "Version" : "2012-10-17",
-            "Statement":
-            [s.to_dict() for s in self.statements]
-        }
-
-        return json.dumps(policy_dict)
-
-def make_json_policy(action, resource, principal={"AWS": "*"}, effect="Allow", conditions=None):
-    """
-    Helper function to make single statement policies
-    """
-    s = Statement(action, resource, principal, effect=effect, condition=conditions)
-    p = Policy()
-    return p.add_statement(s).to_json()
diff --git a/s3tests_boto3/functional/rgw_interactive.py b/s3tests_boto3/functional/rgw_interactive.py
deleted file mode 100644 (file)
index 873a145..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/python
-import boto3
-import os
-import random
-import string
-import itertools
-
-host = "localhost"
-port = 8000
-
-## AWS access key
-access_key = "0555b35654ad1656d804"
-
-## AWS secret key
-secret_key = "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
-
-prefix = "YOURNAMEHERE-1234-"
-
-endpoint_url = "http://%s:%d" % (host, port)
-
-client = boto3.client(service_name='s3',
-                    aws_access_key_id=access_key,
-                    aws_secret_access_key=secret_key,
-                    endpoint_url=endpoint_url,
-                    use_ssl=False,
-                    verify=False)
-
-s3 = boto3.resource('s3', 
-                    use_ssl=False,
-                    verify=False,
-                    endpoint_url=endpoint_url, 
-                    aws_access_key_id=access_key,
-                    aws_secret_access_key=secret_key)
-
-def choose_bucket_prefix(template, max_len=30):
-    """
-    Choose a prefix for our test buckets, so they're easy to identify.
-
-    Use template and feed it more and more random filler, until it's
-    as long as possible but still below max_len.
-    """
-    rand = ''.join(
-        random.choice(string.ascii_lowercase + string.digits)
-        for c in range(255)
-        )
-
-    while rand:
-        s = template.format(random=rand)
-        if len(s) <= max_len:
-            return s
-        rand = rand[:-1]
-
-    raise RuntimeError(
-        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
-            template=template,
-            ),
-        )
-
-bucket_counter = itertools.count(1)
-
-def get_new_bucket_name():
-    """
-    Get a bucket name that probably does not exist.
-
-    We make every attempt to use a unique random prefix, so if a
-    bucket by this name happens to exist, it's ok if tests give
-    false negatives.
-    """
-    name = '{prefix}{num}'.format(
-        prefix=prefix,
-        num=next(bucket_counter),
-        )
-    return name
-
-def get_new_bucket(session=boto3, name=None, headers=None):
-    """
-    Get a bucket that exists and is empty.
-
-    Always recreates a bucket from scratch. This is useful to also
-    reset ACLs and such.
-    """
-    s3 = session.resource('s3', 
-                        use_ssl=False,
-                        verify=False,
-                        endpoint_url=endpoint_url, 
-                        aws_access_key_id=access_key,
-                        aws_secret_access_key=secret_key)
-    if name is None:
-        name = get_new_bucket_name()
-    bucket = s3.Bucket(name)
-    bucket_location = bucket.create()
-    return bucket
diff --git a/s3tests_boto3/functional/test_headers.py b/s3tests_boto3/functional/test_headers.py
deleted file mode 100644 (file)
index 66cabe5..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-import boto3
-import pytest
-from botocore.exceptions import ClientError
-from email.utils import formatdate
-
-from .utils import assert_raises
-from .utils import _get_status_and_error_code
-from .utils import _get_status
-
-from . import (
-    configfile,
-    setup_teardown,
-    get_client,
-    get_v2_client,
-    get_new_bucket,
-    get_new_bucket_name,
-    )
-
-def _add_header_create_object(headers, client=None):
-    """ Create a new bucket, add an object w/header customizations
-    """
-    bucket_name = get_new_bucket()
-    if client == None:
-        client = get_client()
-    key_name = 'foo'
-
-    # pass in custom headers before PutObject call
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.PutObject', add_headers)
-    client.put_object(Bucket=bucket_name, Key=key_name)
-
-    return bucket_name, key_name
-
-
-def _add_header_create_bad_object(headers, client=None):
-    """ Create a new bucket, add an object with a header. This should cause a failure 
-    """
-    bucket_name = get_new_bucket()
-    if client == None:
-        client = get_client()
-    key_name = 'foo'
-
-    # pass in custom headers before PutObject call
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.PutObject', add_headers)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
-
-    return e
-
-
-def _remove_header_create_object(remove, client=None):
-    """ Create a new bucket, add an object without a header
-    """
-    bucket_name = get_new_bucket()
-    if client == None:
-        client = get_client()
-    key_name = 'foo'
-
-    # remove custom headers before PutObject call
-    def remove_header(**kwargs):
-        if (remove in kwargs['params']['headers']):
-            del kwargs['params']['headers'][remove]
-
-    client.meta.events.register('before-call.s3.PutObject', remove_header)
-    client.put_object(Bucket=bucket_name, Key=key_name)
-
-    return bucket_name, key_name
-
-def _remove_header_create_bad_object(remove, client=None):
-    """ Create a new bucket, add an object without a header. This should cause a failure
-    """
-    bucket_name = get_new_bucket()
-    if client == None:
-        client = get_client()
-    key_name = 'foo'
-
-    # remove custom headers before PutObject call
-    def remove_header(**kwargs):
-        if (remove in kwargs['params']['headers']):
-            del kwargs['params']['headers'][remove]
-
-    client.meta.events.register('before-call.s3.PutObject', remove_header)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
-
-    return e
-
-
-def _add_header_create_bucket(headers, client=None):
-    """ Create a new bucket, w/header customizations
-    """
-    bucket_name = get_new_bucket_name()
-    if client == None:
-        client = get_client()
-
-    # pass in custom headers before PutObject call
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
-    client.create_bucket(Bucket=bucket_name)
-
-    return bucket_name
-
-
-def _add_header_create_bad_bucket(headers=None, client=None):
-    """ Create a new bucket, w/header customizations that should cause a failure 
-    """
-    bucket_name = get_new_bucket_name()
-    if client == None:
-        client = get_client()
-
-    # pass in custom headers before PutObject call
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
-    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
-
-    return e
-
-
-def _remove_header_create_bucket(remove, client=None):
-    """ Create a new bucket, without a header
-    """
-    bucket_name = get_new_bucket_name()
-    if client == None:
-        client = get_client()
-
-    # remove custom headers before PutObject call
-    def remove_header(**kwargs):
-        if (remove in kwargs['params']['headers']):
-            del kwargs['params']['headers'][remove]
-
-    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
-    client.create_bucket(Bucket=bucket_name)
-
-    return bucket_name
-
-def _remove_header_create_bad_bucket(remove, client=None):
-    """ Create a new bucket, without a header. This should cause a failure
-    """
-    bucket_name = get_new_bucket_name()
-    if client == None:
-        client = get_client()
-
-    # remove custom headers before PutObject call
-    def remove_header(**kwargs):
-        if (remove in kwargs['params']['headers']):
-            del kwargs['params']['headers'][remove]
-
-    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
-    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
-
-    return e
-
-#
-# common tests
-#
-
-@pytest.mark.auth_common
-def test_object_create_bad_md5_invalid_short():
-    e = _add_header_create_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidDigest'
-
-@pytest.mark.auth_common
-def test_object_create_bad_md5_bad():
-    e = _add_header_create_bad_object({'Content-MD5':'rL0Y20xC+Fzt72VPzMSk2A=='})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'BadDigest'
-
-@pytest.mark.auth_common
-def test_object_create_bad_md5_empty():
-    e = _add_header_create_bad_object({'Content-MD5':''})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidDigest'
-
-@pytest.mark.auth_common
-def test_object_create_bad_md5_none():
-    bucket_name, key_name = _remove_header_create_object('Content-MD5')
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-def test_object_create_bad_expect_mismatch():
-    bucket_name, key_name = _add_header_create_object({'Expect': 200})
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-def test_object_create_bad_expect_empty():
-    bucket_name, key_name = _add_header_create_object({'Expect': ''})
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-def test_object_create_bad_expect_none():
-    bucket_name, key_name = _remove_header_create_object('Expect')
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_contentlength_empty():
-    e = _add_header_create_bad_object({'Content-Length':''})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.auth_common
-@pytest.mark.fails_on_mod_proxy_fcgi
-def test_object_create_bad_contentlength_negative():
-    client = get_client()
-    bucket_name = get_new_bucket()
-    key_name = 'foo'
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, ContentLength=-1)
-    status = _get_status(e.response)
-    assert status == 400
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_contentlength_none():
-    remove = 'Content-Length'
-    e = _remove_header_create_bad_object('Content-Length')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 411
-    assert error_code == 'MissingContentLength'
-
-@pytest.mark.auth_common
-def test_object_create_bad_contenttype_invalid():
-    bucket_name, key_name = _add_header_create_object({'Content-Type': 'text/plain'})
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-def test_object_create_bad_contenttype_empty():
-    client = get_client()
-    key_name = 'foo'
-    bucket_name = get_new_bucket()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar', ContentType='')
-
-@pytest.mark.auth_common
-def test_object_create_bad_contenttype_none():
-    bucket_name = get_new_bucket()
-    key_name = 'foo'
-    client = get_client()
-    # as long as ContentType isn't specified in put_object it isn't going into the request
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_authorization_empty():
-    e = _add_header_create_bad_object({'Authorization': ''})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
-@pytest.mark.fails_on_rgw
-def test_object_create_date_and_amz_date():
-    date = formatdate(usegmt=True)
-    bucket_name, key_name = _add_header_create_object({'Date': date, 'X-Amz-Date': date})
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
-@pytest.mark.fails_on_rgw
-def test_object_create_amz_date_and_no_date():
-    date = formatdate(usegmt=True)
-    bucket_name, key_name = _add_header_create_object({'Date': '', 'X-Amz-Date': date})
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-# the teardown is really messed up here. check it out
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_authorization_none():
-    e = _remove_header_create_bad_object('Authorization')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_contentlength_none():
-    remove = 'Content-Length'
-    _remove_header_create_bucket(remove)
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
-@pytest.mark.fails_on_rgw
-def test_object_acl_create_contentlength_none():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    remove = 'Content-Length'
-    def remove_header(**kwargs):
-        if (remove in kwargs['params']['headers']):
-            del kwargs['params']['headers'][remove]
-
-    client.meta.events.register('before-call.s3.PutObjectAcl', remove_header)
-    client.put_object_acl(Bucket=bucket_name, Key='foo', ACL='public-read')
-
-@pytest.mark.auth_common
-def test_bucket_put_bad_canned_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    headers = {'x-amz-acl': 'public-ready'}
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.PutBucketAcl', add_headers)
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, ACL='public-read')
-    status = _get_status(e.response)
-    assert status == 400
-
-@pytest.mark.auth_common
-def test_bucket_create_bad_expect_mismatch():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    headers = {'Expect': 200}
-    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
-    client.create_bucket(Bucket=bucket_name)
-
-@pytest.mark.auth_common
-def test_bucket_create_bad_expect_empty():
-    headers = {'Expect': ''}
-    _add_header_create_bucket(headers)
-
-@pytest.mark.auth_common
-# TODO: The request isn't even making it to the RGW past the frontend
-# This test had 'fails_on_rgw' before the move to boto3
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_contentlength_empty():
-    headers = {'Content-Length': ''}
-    e = _add_header_create_bad_bucket(headers)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.auth_common
-@pytest.mark.fails_on_mod_proxy_fcgi
-def test_bucket_create_bad_contentlength_negative():
-    headers = {'Content-Length': '-1'}
-    e = _add_header_create_bad_bucket(headers)
-    status = _get_status(e.response)
-    assert status == 400
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_contentlength_none():
-    remove = 'Content-Length'
-    _remove_header_create_bucket(remove)
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_authorization_empty():
-    headers = {'Authorization': ''}
-    e = _add_header_create_bad_bucket(headers)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_common
-# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_authorization_none():
-    e = _remove_header_create_bad_bucket('Authorization')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_md5_invalid_garbage_aws2():
-    v2_client = get_v2_client()
-    headers = {'Content-MD5': 'AWS HAHAHA'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidDigest'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the Content-Length header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_contentlength_mismatch_below_aws2():
-    v2_client = get_v2_client()
-    content = 'bar'
-    length = len(content) - 1
-    headers = {'Content-Length': str(length)}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'BadDigest'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_authorization_incorrect_aws2():
-    v2_client = get_v2_client()
-    headers = {'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'InvalidDigest'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_authorization_invalid_aws2():
-    v2_client = get_v2_client()
-    headers = {'Authorization': 'AWS HAHAHA'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_ua_empty_aws2():
-    v2_client = get_v2_client()
-    headers = {'User-Agent': ''}
-    bucket_name, key_name = _add_header_create_object(headers, v2_client)
-    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_ua_none_aws2():
-    v2_client = get_v2_client()
-    remove = 'User-Agent'
-    bucket_name, key_name = _remove_header_create_object(remove, v2_client)
-    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_date_invalid_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Bad Date'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_date_empty_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': ''}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
-@pytest.mark.fails_on_rgw
-def test_object_create_bad_date_none_aws2():
-    v2_client = get_v2_client()
-    remove = 'x-amz-date'
-    e = _remove_header_create_bad_object(remove, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_date_before_today_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'RequestTimeTooSkewed'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_date_before_epoch_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_object_create_bad_date_after_end_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 9999 21:53:04 GMT'}
-    e = _add_header_create_bad_object(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'RequestTimeTooSkewed'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_authorization_invalid_aws2():
-    v2_client = get_v2_client()
-    headers = {'Authorization': 'AWS HAHAHA'}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_ua_empty_aws2():
-    v2_client = get_v2_client()
-    headers = {'User-Agent': ''}
-    _add_header_create_bucket(headers, v2_client)
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_ua_none_aws2():
-    v2_client = get_v2_client()
-    remove = 'User-Agent'
-    _remove_header_create_bucket(remove, v2_client)
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_date_invalid_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Bad Date'}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_date_empty_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': ''}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
-@pytest.mark.fails_on_rgw
-def test_bucket_create_bad_date_none_aws2():
-    v2_client = get_v2_client()
-    remove = 'x-amz-date'
-    e = _remove_header_create_bad_bucket(remove, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_date_before_today_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'RequestTimeTooSkewed'
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_date_after_today_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 2030 21:53:04 GMT'}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'RequestTimeTooSkewed'
-
-@pytest.mark.auth_aws2
-def test_bucket_create_bad_date_before_epoch_aws2():
-    v2_client = get_v2_client()
-    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
-    e = _add_header_create_bad_bucket(headers, v2_client)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
diff --git a/s3tests_boto3/functional/test_iam.py b/s3tests_boto3/functional/test_iam.py
deleted file mode 100644 (file)
index fb288ce..0000000
+++ /dev/null
@@ -1,2803 +0,0 @@
-import json
-import datetime
-import time
-
-from botocore.exceptions import ClientError
-import pytest
-
-from s3tests_boto3.functional.utils import assert_raises
-from s3tests_boto3.functional.test_s3 import _multipart_upload
-from . import (
-    configfile,
-    setup_teardown,
-    get_alt_client,
-    get_iam_client,
-    get_iam_root_client,
-    get_iam_alt_root_client,
-    get_iam_alt_root_user_id,
-    get_iam_alt_root_email,
-    make_iam_name,
-    get_iam_path_prefix,
-    get_new_bucket,
-    get_new_bucket_name,
-    get_iam_s3client,
-    get_alt_iam_client,
-    get_alt_user_id,
-    get_sts_client,
-)
-from .utils import _get_status, _get_status_and_error_code
-from .iam import iam_root, iam_alt_root
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_put_user_policy():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_put_user_policy_invalid_user():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id")
-    status = _get_status(e.response)
-    assert status == 404
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_put_user_policy_parameter_limit():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": [{
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}] * 1000
-         }
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy' * 10, UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 400
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_rgw
-def test_put_user_policy_invalid_element():
-    client = get_iam_client()
-
-    # With Version other than 2012-10-17
-    policy_document = json.dumps(
-        {"Version": "2010-10-17",
-         "Statement": [{
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}]
-         }
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 400
-
-    # With no Statement
-    policy_document = json.dumps(
-        {
-            "Version": "2012-10-17",
-        }
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 400
-
-    # with same Sid for 2 statements
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": [
-             {"Sid": "98AB54CF",
-              "Effect": "Allow",
-              "Action": "*",
-              "Resource": "*"},
-             {"Sid": "98AB54CF",
-              "Effect": "Allow",
-              "Action": "*",
-              "Resource": "*"}]
-         }
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 400
-
-    # with Principal
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": [{
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*",
-             "Principal": "arn:aws:iam:::username"}]
-         }
-    )
-    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
-                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 400
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_put_existing_user_policy():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}
-         }
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                           UserName=get_alt_user_id())
-    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_list_user_policy():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}
-         }
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.list_user_policies(UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_list_user_policy_invalid_user():
-    client = get_iam_client()
-    e = assert_raises(ClientError, client.list_user_policies, UserName="some-non-existing-user-id")
-    status = _get_status(e.response)
-    assert status == 404
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_get_user_policy():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.get_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_get_user_policy_invalid_user():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
-                      UserName="some-non-existing-user-id")
-    status = _get_status(e.response)
-    assert status == 404
-    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_rgw
-def test_get_user_policy_invalid_policy_name():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                           UserName=get_alt_user_id())
-    e = assert_raises(ClientError, client.get_user_policy, PolicyName='non-existing-policy-name',
-                      UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 404
-    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_rgw
-def test_get_deleted_user_policy():
-    client = get_iam_client()
-
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
-                           UserName=get_alt_user_id())
-    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
-    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
-                      UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 404
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_get_user_policy_from_multiple_policies():
-    client = get_iam_client()
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy1',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy2',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.get_user_policy(PolicyName='AllowAccessPolicy2',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_delete_user_policy():
-    client = get_iam_client()
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_delete_user_policy_invalid_user():
-    client = get_iam_client()
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='AllAccessPolicy',
-                      UserName="some-non-existing-user-id")
-    status = _get_status(e.response)
-    assert status == 404
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_delete_user_policy_invalid_policy_name():
-    client = get_iam_client()
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='non-existing-policy-name',
-                      UserName=get_alt_user_id())
-    status = _get_status(e.response)
-    assert status == 404
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_delete_user_policy_from_multiple_policies():
-    client = get_iam_client()
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": "*",
-             "Resource": "*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy1',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy2',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy3',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.get_user_policy(PolicyName='AllowAccessPolicy3',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy3',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_allow_bucket_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client_alt = get_alt_client()
-
-    s3_client_iam = get_iam_s3client()
-    bucket = get_new_bucket(client=s3_client_iam)
-    s3_client_iam.put_object(Bucket=bucket, Key='foo', Body='bar')
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": ["s3:ListBucket", "s3:DeleteBucket"],
-             "Resource": f"arn:aws:s3:::{bucket}"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = s3_client_alt.list_objects(Bucket=bucket)
-    object_found = False
-    for object_received in response['Contents']:
-        if "foo" == object_received['Key']:
-            object_found = True
-            break
-    if not object_found:
-        raise AssertionError("Object is not listed")
-
-    response = s3_client_iam.delete_object(Bucket=bucket, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response = s3_client_alt.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response = s3_client_iam.list_buckets()
-    for bucket in response['Buckets']:
-        if bucket == bucket['Name']:
-            raise AssertionError("deleted bucket is getting listed")
-
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_deny_bucket_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client = get_alt_client()
-    bucket = get_new_bucket(client=s3_client)
-
-    policy_document_deny = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Deny",
-             "Action": ["s3:ListAllMyBuckets", "s3:DeleteBucket"],
-             "Resource": "arn:aws:s3:::*"}}
-    )
-
-    response = client.put_user_policy(PolicyDocument=policy_document_deny,
-                                      PolicyName='DenyAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    e = assert_raises(ClientError, s3_client.list_buckets, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    e = assert_raises(ClientError, s3_client.delete_bucket, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = s3_client.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_allow_object_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client_alt = get_alt_client()
-    s3_client_iam = get_iam_s3client()
-    bucket = get_new_bucket(client=s3_client_iam)
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
-             "Resource": f"arn:aws:s3:::{bucket}/*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
-    response = s3_client_alt.get_object(Bucket=bucket, Key='foo')
-    body = response['Body'].read()
-    if type(body) is bytes:
-        body = body.decode()
-    assert body == "bar"
-    response = s3_client_alt.delete_object(Bucket=bucket, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    e = assert_raises(ClientError, s3_client_iam.get_object, Bucket=bucket, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-    response = s3_client_iam.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_deny_object_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client_alt = get_alt_client()
-    bucket = get_new_bucket(client=s3_client_alt)
-    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
-
-    policy_document_deny = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": [{
-             "Effect": "Deny",
-             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
-             "Resource": f"arn:aws:s3:::{bucket}/*"}, {
-             "Effect": "Allow",
-             "Action": ["s3:DeleteBucket"],
-             "Resource": f"arn:aws:s3:::{bucket}"}]}
-    )
-    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
-                           UserName=get_alt_user_id())
-
-    e = assert_raises(ClientError, s3_client_alt.put_object, Bucket=bucket, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    e = assert_raises(ClientError, s3_client_alt.get_object, Bucket=bucket, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    e = assert_raises(ClientError, s3_client_alt.delete_object, Bucket=bucket, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_allow_multipart_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client_alt = get_alt_client()
-    s3_client_iam = get_iam_s3client()
-    bucket = get_new_bucket(client=s3_client_iam)
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
-             "Resource": "arn:aws:s3:::*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document_allow,
-                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    key = "mymultipart"
-    mb = 1024 * 1024
-
-    (upload_id, _, _) = _multipart_upload(client=s3_client_iam, bucket_name=bucket, key=key,
-                                          size=5 * mb)
-    response = s3_client_alt.list_multipart_uploads(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = s3_client_alt.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response = s3_client_iam.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_deny_multipart_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client = get_alt_client()
-    bucket = get_new_bucket(client=s3_client)
-
-    policy_document_deny = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Deny",
-             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
-             "Resource": "arn:aws:s3:::*"}}
-    )
-    response = client.put_user_policy(PolicyDocument=policy_document_deny,
-                                      PolicyName='DenyAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    key = "mymultipart"
-    mb = 1024 * 1024
-
-    (upload_id, _, _) = _multipart_upload(client=s3_client, bucket_name=bucket, key=key,
-                                          size=5 * mb)
-
-    e = assert_raises(ClientError, s3_client.list_multipart_uploads, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    e = assert_raises(ClientError, s3_client.abort_multipart_upload, Bucket=bucket,
-                      Key=key, UploadId=upload_id)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = s3_client.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_allow_tagging_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client_alt = get_alt_client()
-    s3_client_iam = get_iam_s3client()
-    bucket = get_new_bucket(client=s3_client_iam)
-
-    policy_document_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Allow",
-             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
-                        "s3:PutObjectTagging", "s3:GetObjectTagging"],
-             "Resource": f"arn:aws:s3:::*"}}
-    )
-    client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy',
-                           UserName=get_alt_user_id())
-    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
-
-    response = s3_client_alt.put_bucket_tagging(Bucket=bucket, Tagging=tags)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = s3_client_alt.get_bucket_tagging(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['TagSet'][0]['Key'] == 'Hello'
-    assert response['TagSet'][0]['Value'] == 'World'
-
-    obj_key = 'obj'
-    response = s3_client_iam.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = s3_client_alt.put_object_tagging(Bucket=bucket, Key=obj_key, Tagging=tags)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = s3_client_alt.get_object_tagging(Bucket=bucket, Key=obj_key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['TagSet'] == tags['TagSet']
-
-    response = s3_client_iam.delete_object(Bucket=bucket, Key=obj_key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = s3_client_iam.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_deny_tagging_actions_in_user_policy():
-    client = get_iam_client()
-    s3_client = get_alt_client()
-    bucket = get_new_bucket(client=s3_client)
-
-    policy_document_deny = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {
-             "Effect": "Deny",
-             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
-                        "s3:PutObjectTagging", "s3:DeleteObjectTagging"],
-             "Resource": "arn:aws:s3:::*"}}
-    )
-    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
-                           UserName=get_alt_user_id())
-    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
-
-    e = assert_raises(ClientError, s3_client.put_bucket_tagging, Bucket=bucket, Tagging=tags)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    e = assert_raises(ClientError, s3_client.get_bucket_tagging, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    obj_key = 'obj'
-    response = s3_client.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    e = assert_raises(ClientError, s3_client.put_object_tagging, Bucket=bucket, Key=obj_key,
-                      Tagging=tags)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    e = assert_raises(ClientError, s3_client.delete_object_tagging, Bucket=bucket, Key=obj_key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = s3_client.delete_object(Bucket=bucket, Key=obj_key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = s3_client.delete_bucket(Bucket=bucket)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_verify_conflicting_user_policy_statements():
-    s3client = get_alt_client()
-    bucket = get_new_bucket(client=s3client)
-    policy_document = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": [
-             {"Sid": "98AB54CG",
-              "Effect": "Allow",
-              "Action": "s3:ListBucket",
-              "Resource": f"arn:aws:s3:::{bucket}"},
-             {"Sid": "98AB54CA",
-              "Effect": "Deny",
-              "Action": "s3:ListBucket",
-              "Resource": f"arn:aws:s3:::{bucket}"}
-         ]}
-    )
-    client = get_iam_client()
-    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='DenyAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-@pytest.mark.fails_on_dbstore
-def test_verify_conflicting_user_policies():
-    s3client = get_alt_client()
-    bucket = get_new_bucket(client=s3client)
-    policy_allow = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {"Sid": "98AB54CG",
-                       "Effect": "Allow",
-                       "Action": "s3:ListBucket",
-                       "Resource": f"arn:aws:s3:::{bucket}"}}
-    )
-    policy_deny = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {"Sid": "98AB54CGZ",
-                       "Effect": "Deny",
-                       "Action": "s3:ListBucket",
-                       "Resource": f"arn:aws:s3:::{bucket}"}}
-    )
-    client = get_iam_client()
-    response = client.put_user_policy(PolicyDocument=policy_allow, PolicyName='AllowAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.put_user_policy(PolicyDocument=policy_deny, PolicyName='DenyAccessPolicy',
-                                      UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
-                                         UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.user_policy
-@pytest.mark.iam_tenant
-def test_verify_allow_iam_actions():
-    policy1 = json.dumps(
-        {"Version": "2012-10-17",
-         "Statement": {"Sid": "98AB54CGA",
-                       "Effect": "Allow",
-                       "Action": ["iam:PutUserPolicy", "iam:GetUserPolicy",
-                                  "iam:ListUserPolicies", "iam:DeleteUserPolicy"],
-                       "Resource": f"arn:aws:iam:::user/{get_alt_user_id()}"}}
-    )
-    client1 = get_iam_client()
-    iam_client_alt = get_alt_iam_client()
-
-    response = client1.put_user_policy(PolicyDocument=policy1, PolicyName='AllowAccessPolicy',
-                                       UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = iam_client_alt.get_user_policy(PolicyName='AllowAccessPolicy',
-                                       UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = iam_client_alt.list_user_policies(UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = iam_client_alt.delete_user_policy(PolicyName='AllowAccessPolicy',
-                                          UserName=get_alt_user_id())
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-# IAM User apis
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_create(iam_root):
-    path = get_iam_path_prefix()
-    name1 = make_iam_name('U1')
-    response = iam_root.create_user(UserName=name1, Path=path)
-    user = response['User']
-    assert user['Path'] == path
-    assert user['UserName'] == name1
-    assert len(user['UserId'])
-    assert user['Arn'].startswith('arn:aws:iam:')
-    assert user['Arn'].endswith(f':user{path}{name1}')
-    assert user['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-
-    path2 = get_iam_path_prefix() + 'foo/'
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_user(UserName=name1, Path=path2)
-
-    name2 = make_iam_name('U2')
-    response = iam_root.create_user(UserName=name2, Path=path2)
-    user = response['User']
-    assert user['Path'] == path2
-    assert user['UserName'] == name2
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_case_insensitive_name(iam_root):
-    path = get_iam_path_prefix()
-    name_upper = make_iam_name('U1')
-    name_lower = make_iam_name('u1')
-    response = iam_root.create_user(UserName=name_upper, Path=path)
-    user = response['User']
-
-    # name is case-insensitive, so 'u1' should also conflict
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_user(UserName=name_lower)
-
-    # search for 'u1' should return the same 'U1' user
-    response = iam_root.get_user(UserName=name_lower)
-    assert user == response['User']
-
-    # delete for 'u1' should delete the same 'U1' user
-    iam_root.delete_user(UserName=name_lower)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_user(UserName=name_lower)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_delete(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('U1')
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_user(UserName=name)
-
-    response = iam_root.create_user(UserName=name, Path=path)
-    uid = response['User']['UserId']
-    create_date = response['User']['CreateDate']
-
-    iam_root.delete_user(UserName=name)
-
-    response = iam_root.create_user(UserName=name, Path=path)
-    assert uid != response['User']['UserId']
-    assert create_date <= response['User']['CreateDate']
-
-def user_list_names(client, **kwargs):
-    p = client.get_paginator('list_users')
-    usernames = []
-    for response in p.paginate(**kwargs):
-        usernames += [u['UserName'] for u in response['Users']]
-    return usernames
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_list(iam_root):
-    path = get_iam_path_prefix()
-    response = iam_root.list_users(PathPrefix=path)
-    assert len(response['Users']) == 0
-    assert response['IsTruncated'] == False
-
-    name1 = make_iam_name('aa')
-    name2 = make_iam_name('Ab')
-    name3 = make_iam_name('ac')
-    name4 = make_iam_name('Ad')
-
-    # sort order is independent of CreateDate, Path, and UserName capitalization
-    iam_root.create_user(UserName=name4, Path=path+'w/')
-    iam_root.create_user(UserName=name3, Path=path+'x/')
-    iam_root.create_user(UserName=name2, Path=path+'y/')
-    iam_root.create_user(UserName=name1, Path=path+'z/')
-
-    assert [name1, name2, name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path)
-    assert [name1, name2, name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_list_path_prefix(iam_root):
-    path = get_iam_path_prefix()
-    response = iam_root.list_users(PathPrefix=path)
-    assert len(response['Users']) == 0
-    assert response['IsTruncated'] == False
-
-    name1 = make_iam_name('a')
-    name2 = make_iam_name('b')
-    name3 = make_iam_name('c')
-    name4 = make_iam_name('d')
-
-    iam_root.create_user(UserName=name1, Path=path)
-    iam_root.create_user(UserName=name2, Path=path)
-    iam_root.create_user(UserName=name3, Path=path+'a/')
-    iam_root.create_user(UserName=name4, Path=path+'a/x/')
-
-    assert [name1, name2, name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path)
-    assert [name1, name2, name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path,
-                            PaginationConfig={'PageSize': 1})
-    assert [name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path+'a')
-    assert [name3, name4] == \
-            user_list_names(iam_root, PathPrefix=path+'a',
-                            PaginationConfig={'PageSize': 1})
-    assert [name4] == \
-            user_list_names(iam_root, PathPrefix=path+'a/x')
-    assert [name4] == \
-            user_list_names(iam_root, PathPrefix=path+'a/x',
-                            PaginationConfig={'PageSize': 1})
-    assert [] == user_list_names(iam_root, PathPrefix=path+'a/x/d')
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_update_name(iam_root):
-    path = get_iam_path_prefix()
-    name1 = make_iam_name('a')
-    new_name1 = make_iam_name('z')
-    name2 = make_iam_name('b')
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.update_user(UserName=name1, NewUserName=new_name1)
-
-    iam_root.create_user(UserName=name1, Path=path)
-    iam_root.create_user(UserName=name2, Path=path+'m/')
-    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
-
-    response = iam_root.get_user(UserName=name1)
-    assert name1 == response['User']['UserName']
-    uid = response['User']['UserId']
-
-    iam_root.update_user(UserName=name1, NewUserName=new_name1)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_user(UserName=name1)
-
-    response = iam_root.get_user(UserName=new_name1)
-    assert new_name1 == response['User']['UserName']
-    assert uid == response['User']['UserId']
-    assert response['User']['Arn'].endswith(f':user{path}{new_name1}')
-
-    assert [name2, new_name1] == user_list_names(iam_root, PathPrefix=path)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_update_path(iam_root):
-    path = get_iam_path_prefix()
-    name1 = make_iam_name('a')
-    name2 = make_iam_name('b')
-    iam_root.create_user(UserName=name1, Path=path)
-    iam_root.create_user(UserName=name2, Path=path+'m/')
-    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
-
-    response = iam_root.get_user(UserName=name1)
-    assert name1 == response['User']['UserName']
-    assert path == response['User']['Path']
-    uid = response['User']['UserId']
-
-    iam_root.update_user(UserName=name1, NewPath=path+'z/')
-
-    response = iam_root.get_user(UserName=name1)
-    assert name1 == response['User']['UserName']
-    assert f'{path}z/' == response['User']['Path']
-    assert uid == response['User']['UserId']
-    assert response['User']['Arn'].endswith(f':user{path}z/{name1}')
-
-    assert [name1, name2] == user_list_names(iam_root, PathPrefix=path)
-
-
-# IAM AccessKey apis
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_access_key_create(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('a')
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.create_access_key(UserName=name)
-
-    iam_root.create_user(UserName=name, Path=path)
-
-    response = iam_root.create_access_key(UserName=name)
-    key = response['AccessKey']
-    assert name == key['UserName']
-    assert len(key['AccessKeyId'])
-    assert len(key['SecretAccessKey'])
-    assert 'Active' == key['Status']
-    assert key['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_current_user_access_key_create(iam_root):
-    # omit the UserName argument to operate on the current authenticated
-    # user (assumed to be an account root user)
-
-    response = iam_root.create_access_key()
-    key = response['AccessKey']
-    keyid = key['AccessKeyId']
-    assert len(keyid)
-    try:
-        assert len(key['SecretAccessKey'])
-        assert 'Active' == key['Status']
-        assert key['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-    finally:
-        # iam_root doesn't see the account root user, so clean up
-        # this key manually
-        iam_root.delete_access_key(AccessKeyId=keyid)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_access_key_update(iam_root):
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.update_access_key(UserName='nosuchuser', AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
-
-    path = get_iam_path_prefix()
-    name = make_iam_name('a')
-    iam_root.create_user(UserName=name, Path=path)
-
-    response = iam_root.create_access_key(UserName=name)
-    key = response['AccessKey']
-    keyid = key['AccessKeyId']
-    create_date = key['CreateDate']
-    assert create_date > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.update_access_key(UserName=name, AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
-
-    iam_root.update_access_key(UserName=name, AccessKeyId=keyid, Status='Active')
-    iam_root.update_access_key(UserName=name, AccessKeyId=keyid, Status='Inactive')
-
-    response = iam_root.list_access_keys(UserName=name)
-    keys = response['AccessKeyMetadata']
-    assert 1 == len(keys)
-    key = keys[0]
-    assert name == key['UserName']
-    assert keyid == key['AccessKeyId']
-    assert 'Inactive' == key['Status']
-    assert create_date == key['CreateDate'] # CreateDate unchanged by update_access_key()
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_current_user_access_key_update(iam_root):
-    # omit the UserName argument to operate on the current authenticated
-    # user (assumed to be an account root user)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.update_access_key(AccessKeyId='abcdefghijklmnopqrstu', Status='Active')
-
-    response = iam_root.create_access_key()
-    key = response['AccessKey']
-    keyid = key['AccessKeyId']
-    assert len(keyid)
-    try:
-        iam_root.update_access_key(AccessKeyId=keyid, Status='Active')
-        iam_root.update_access_key(AccessKeyId=keyid, Status='Inactive')
-
-        # find the access key id we created
-        p = iam_root.get_paginator('list_access_keys')
-        for response in p.paginate():
-            for key in response['AccessKeyMetadata']:
-                if keyid == key['AccessKeyId']:
-                    assert 'Inactive' == key['Status']
-                    return
-        assert False, f'AccessKeyId={keyid} not found in list_access_keys()'
-
-    finally:
-        # iam_root doesn't see the account root user, so clean up
-        # this key manually
-        iam_root.delete_access_key(AccessKeyId=keyid)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_access_key_delete(iam_root):
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_access_key(UserName='nosuchuser', AccessKeyId='abcdefghijklmnopqrstu')
-
-    path = get_iam_path_prefix()
-    name = make_iam_name('a')
-    iam_root.create_user(UserName=name, Path=path)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_access_key(UserName=name, AccessKeyId='abcdefghijklmnopqrstu')
-
-    response = iam_root.create_access_key(UserName=name)
-    keyid = response['AccessKey']['AccessKeyId']
-
-    iam_root.delete_access_key(UserName=name, AccessKeyId=keyid)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_access_key(UserName=name, AccessKeyId=keyid)
-
-    response = iam_root.list_access_keys(UserName=name)
-    keys = response['AccessKeyMetadata']
-    assert 0 == len(keys)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_current_user_access_key_delete(iam_root):
-    # omit the UserName argument to operate on the current authenticated
-    # user (assumed to be an account root user)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_access_key(AccessKeyId='abcdefghijklmnopqrstu')
-
-    response = iam_root.create_access_key()
-    keyid = response['AccessKey']['AccessKeyId']
-
-    iam_root.delete_access_key(AccessKeyId=keyid)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_access_key(AccessKeyId=keyid)
-
-    # make sure list_access_keys() doesn't return the access key id we deleted
-    p = iam_root.get_paginator('list_access_keys')
-    for response in p.paginate():
-        for key in response['AccessKeyMetadata']:
-            assert keyid != key['AccessKeyId']
-
-def user_list_key_ids(client, **kwargs):
-    p = client.get_paginator('list_access_keys')
-    ids = []
-    for response in p.paginate(**kwargs):
-        ids += [k['AccessKeyId'] for k in response['AccessKeyMetadata']]
-    return ids
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_access_key_list(iam_root):
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.list_access_keys(UserName='nosuchuser')
-
-    path = get_iam_path_prefix()
-    name = make_iam_name('a')
-    iam_root.create_user(UserName=name, Path=path)
-
-    assert [] == user_list_key_ids(iam_root, UserName=name)
-    assert [] == user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1})
-
-    id1 = iam_root.create_access_key(UserName=name)['AccessKey']['AccessKeyId']
-
-    assert [id1] == user_list_key_ids(iam_root, UserName=name)
-    assert [id1] == user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1})
-
-    id2 = iam_root.create_access_key(UserName=name)['AccessKey']['AccessKeyId']
-    # AccessKeysPerUser=2 is the default quota in aws
-
-    keys = sorted([id1, id2])
-    assert keys == sorted(user_list_key_ids(iam_root, UserName=name))
-    assert keys == sorted(user_list_key_ids(iam_root, UserName=name, PaginationConfig={'PageSize': 1}))
-
-def retry_on(code, tries, func, *args, **kwargs):
-    for i in range(tries):
-        try:
-            return func(*args, **kwargs)
-        except ClientError as e:
-            err = e.response['Error']['Code']
-            if i + 1 < tries and err in code:
-                print(f'Got {err}, retrying in {i}s..')
-                time.sleep(i)
-                continue
-            raise
-
-
-@pytest.mark.iam_account
-@pytest.mark.iam_user
-def test_account_user_bucket_policy_allow(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    response = iam_root.create_user(UserName=name, Path=path)
-    user_arn = response['User']['Arn']
-    assert user_arn.startswith('arn:aws:iam:')
-    assert user_arn.endswith(f':user{path}{name}')
-
-    key = iam_root.create_access_key(UserName=name)['AccessKey']
-    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                              aws_secret_access_key=key['SecretAccessKey'])
-
-    # create a bucket with the root user
-    roots3 = get_iam_root_client(service_name='s3')
-    bucket = get_new_bucket(roots3)
-    try:
-        # the access key may take a bit to start working. retry until it returns
-        # something other than InvalidAccessKeyId
-        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_objects, Bucket=bucket)
-        # expect AccessDenied because no identity policy allows s3 actions
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a bucket policy that allows s3:ListBucket for the iam user's arn
-        policy = json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Principal': {'AWS': user_arn},
-                'Action': 's3:ListBucket',
-                'Resource': f'arn:aws:s3:::{bucket}'
-                }]
-            })
-        roots3.put_bucket_policy(Bucket=bucket, Policy=policy)
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, client.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-
-# IAM UserPolicy apis
-@pytest.mark.user_policy
-@pytest.mark.iam_account
-def test_account_user_policy(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    policy_name = 'List'
-    bucket_name = get_new_bucket_name()
-    policy1 = json.dumps({'Version': '2012-10-17', 'Statement': [
-        {'Effect': 'Deny',
-         'Action': 's3:ListBucket',
-         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
-    policy2 = json.dumps({'Version': '2012-10-17', 'Statement': [
-        {'Effect': 'Allow',
-         'Action': 's3:ListBucket',
-         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
-
-    # Get/Put/Delete fail on nonexistent UserName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy1)
-
-    iam_root.create_user(UserName=name, Path=path)
-
-    # Get/Delete fail on nonexistent PolicyName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
-
-    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy1)
-
-    response = iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
-    assert policy1 == json.dumps(response['PolicyDocument'])
-    response = iam_root.list_user_policies(UserName=name)
-    assert [policy_name] == response['PolicyNames']
-
-    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy2)
-
-    response = iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
-    assert policy2 == json.dumps(response['PolicyDocument'])
-    response = iam_root.list_user_policies(UserName=name)
-    assert [policy_name] == response['PolicyNames']
-
-    iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
-
-    # Get/Delete fail after Delete
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_user_policy(UserName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_user_policy(UserName=name, PolicyName=policy_name)
-
-    response = iam_root.list_user_policies(UserName=name)
-    assert [] == response['PolicyNames']
-
-@pytest.mark.user_policy
-@pytest.mark.iam_account
-def test_account_user_policy_managed(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
-    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
-
-    # Attach/Detach/List fail on nonexistent UserName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_user_policy(UserName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.list_attached_user_policies(UserName=name)
-
-    iam_root.create_user(UserName=name, Path=path)
-
-    # Detach fails on unattached PolicyArn
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_user_policy(UserName=name, PolicyArn=policy1)
-
-    iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
-    iam_root.attach_user_policy(UserName=name, PolicyArn=policy1)
-
-    response = iam_root.list_attached_user_policies(UserName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    iam_root.attach_user_policy(UserName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_user_policies(UserName=name)
-    policies = response['AttachedPolicies']
-    assert len(policies) == 2
-    names = [p['PolicyName'] for p in policies]
-    arns = [p['PolicyArn'] for p in policies]
-    assert 'AmazonS3FullAccess' in names
-    assert policy1 in arns
-    assert 'AmazonS3ReadOnlyAccess' in names
-    assert policy2 in arns
-
-    iam_root.detach_user_policy(UserName=name, PolicyArn=policy2)
-
-    # Detach fails after Detach
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_user_policy(UserName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_user_policies(UserName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    # DeleteUser fails while policies are still attached
-    with pytest.raises(iam_root.exceptions.DeleteConflictException):
-        iam_root.delete_user(UserName=name)
-
-@pytest.mark.user_policy
-@pytest.mark.iam_account
-def test_account_user_policy_allow(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    bucket_name = get_new_bucket_name()
-    iam_root.create_user(UserName=name, Path=path)
-
-    key = iam_root.create_access_key(UserName=name)['AccessKey']
-    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                              aws_secret_access_key=key['SecretAccessKey'])
-
-    # the access key may take a bit to start working. retry until it returns
-    # something other than InvalidAccessKeyId
-    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
-    # expect AccessDenied because no identity policy allows s3 actions
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    # add a user policy that allows s3 actions
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:*',
-            'Resource': '*'
-            }]
-        })
-    policy_name = 'AllowStar'
-    iam_root.put_user_policy(UserName=name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, client.list_buckets)
-
-
-def group_list_names(client, **kwargs):
-    p = client.get_paginator('list_groups')
-    names = []
-    for response in p.paginate(**kwargs):
-        names += [u['GroupName'] for u in response['Groups']]
-    return names
-
-# IAM Group apis
-@pytest.mark.group
-@pytest.mark.iam_account
-def test_account_group_create(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('G1')
-
-    assert [] == group_list_names(iam_root, PathPrefix=path)
-
-    response = iam_root.create_group(GroupName=name, Path=path)
-    group = response['Group']
-    assert path == group['Path']
-    assert name == group['GroupName']
-    assert len(group['GroupId'])
-    arn = group['Arn']
-    assert arn.startswith('arn:aws:iam:')
-    assert arn.endswith(f':group{path}{name}')
-
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_group(GroupName=name)
-
-    response = iam_root.get_group(GroupName=name)
-    assert group == response['Group']
-
-    assert [name] == group_list_names(iam_root, PathPrefix=path)
-
-    iam_root.delete_group(GroupName=name)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_group(GroupName=name)
-
-    assert [] == group_list_names(iam_root, PathPrefix=path)
-
-@pytest.mark.iam_account
-@pytest.mark.group
-def test_account_group_case_insensitive_name(iam_root):
-    path = get_iam_path_prefix()
-    name_upper = make_iam_name('G1')
-    name_lower = make_iam_name('g1')
-    response = iam_root.create_group(GroupName=name_upper, Path=path)
-    group = response['Group']
-
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_group(GroupName=name_lower)
-
-    response = iam_root.get_group(GroupName=name_lower)
-    assert group == response['Group']
-
-    iam_root.delete_group(GroupName=name_lower)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_group(GroupName=name_upper)
-
-@pytest.mark.iam_account
-@pytest.mark.group
-def test_account_group_list(iam_root):
-    path = get_iam_path_prefix()
-    response = iam_root.list_groups(PathPrefix=path)
-    assert len(response['Groups']) == 0
-    assert response['IsTruncated'] == False
-
-    name1 = make_iam_name('aa')
-    name2 = make_iam_name('Ab')
-    name3 = make_iam_name('ac')
-    name4 = make_iam_name('Ad')
-
-    # sort order is independent of Path and GroupName capitalization
-    iam_root.create_group(GroupName=name4, Path=path+'w/')
-    iam_root.create_group(GroupName=name3, Path=path+'x/')
-    iam_root.create_group(GroupName=name2, Path=path+'y/')
-    iam_root.create_group(GroupName=name1, Path=path+'z/')
-
-    assert [name1, name2, name3, name4] == \
-            group_list_names(iam_root, PathPrefix=path)
-    assert [name1, name2, name3, name4] == \
-            group_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
-
-@pytest.mark.group
-@pytest.mark.iam_account
-def test_account_group_update(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('G1')
-    response = iam_root.create_group(GroupName=name, Path=path)
-    group_id = response['Group']['GroupId']
-
-    username = make_iam_name('U1')
-    iam_root.create_user(UserName=username, Path=path)
-
-    iam_root.add_user_to_group(GroupName=name, UserName=username)
-
-    response = iam_root.list_groups_for_user(UserName=username)
-    groups = response['Groups']
-    assert len(groups) == 1
-    assert path == groups[0]['Path']
-    assert name == groups[0]['GroupName']
-    assert group_id == groups[0]['GroupId']
-
-    new_path = path + 'new/'
-    new_name = make_iam_name('NG1')
-    iam_root.update_group(GroupName=name, NewPath=new_path, NewGroupName=new_name)
-
-    response = iam_root.get_group(GroupName=new_name)
-    group = response['Group']
-    assert new_path == group['Path']
-    assert new_name == group['GroupName']
-    assert group_id == group['GroupId']
-    arn = group['Arn']
-    assert arn.startswith('arn:aws:iam:')
-    assert arn.endswith(f':group{new_path}{new_name}')
-    users = response['Users']
-    assert len(users) == 1
-    assert username == users[0]['UserName']
-
-    response = iam_root.list_groups_for_user(UserName=username)
-    groups = response['Groups']
-    assert len(groups) == 1
-    assert new_path == groups[0]['Path']
-    assert new_name == groups[0]['GroupName']
-    assert group_id == groups[0]['GroupId']
-
-
-# IAM GroupPolicy apis
-@pytest.mark.group_policy
-@pytest.mark.iam_account
-def test_account_inline_group_policy(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    policy_name = 'List'
-    bucket_name = get_new_bucket_name()
-    policy1 = json.dumps({'Version': '2012-10-17', 'Statement': [
-        {'Effect': 'Deny',
-         'Action': 's3:ListBucket',
-         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
-    policy2 = json.dumps({'Version': '2012-10-17', 'Statement': [
-        {'Effect': 'Allow',
-         'Action': 's3:ListBucket',
-         'Resource': f'arn:aws:s3:::{bucket_name}'}]})
-
-    # Get/Put/Delete fail on nonexistent GroupName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy1)
-
-    iam_root.create_group(GroupName=name, Path=path)
-
-    # Get/Delete fail on nonexistent PolicyName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
-
-    iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy1)
-
-    response = iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
-    assert policy1 == json.dumps(response['PolicyDocument'])
-    response = iam_root.list_group_policies(GroupName=name)
-    assert [policy_name] == response['PolicyNames']
-
-    iam_root.put_group_policy(GroupName=name, PolicyName=policy_name, PolicyDocument=policy2)
-
-    response = iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
-    assert policy2 == json.dumps(response['PolicyDocument'])
-    response = iam_root.list_group_policies(GroupName=name)
-    assert [policy_name] == response['PolicyNames']
-
-    # DeleteGroup fails while policies are still attached
-    with pytest.raises(iam_root.exceptions.DeleteConflictException):
-        iam_root.delete_group(GroupName=name)
-
-    iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
-
-    # Get/Delete fail after Delete
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_group_policy(GroupName=name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_group_policy(GroupName=name, PolicyName=policy_name)
-
-    response = iam_root.list_group_policies(GroupName=name)
-    assert [] == response['PolicyNames']
-
-@pytest.mark.group_policy
-@pytest.mark.iam_account
-def test_account_managed_group_policy(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
-    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
-
-    # Attach/Detach/List fail on nonexistent GroupName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.list_attached_group_policies(GroupName=name)
-
-    iam_root.create_group(GroupName=name, Path=path)
-
-    # Detach fails on unattached PolicyArn
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy1)
-
-    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
-    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy1)
-
-    response = iam_root.list_attached_group_policies(GroupName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    iam_root.attach_group_policy(GroupName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_group_policies(GroupName=name)
-    policies = response['AttachedPolicies']
-    assert len(policies) == 2
-    names = [p['PolicyName'] for p in policies]
-    arns = [p['PolicyArn'] for p in policies]
-    assert 'AmazonS3FullAccess' in names
-    assert policy1 in arns
-    assert 'AmazonS3ReadOnlyAccess' in names
-    assert policy2 in arns
-
-    iam_root.detach_group_policy(GroupName=name, PolicyArn=policy2)
-
-    # Detach fails after Detach
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_group_policy(GroupName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_group_policies(GroupName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    # DeleteGroup fails while policies are still attached
-    with pytest.raises(iam_root.exceptions.DeleteConflictException):
-        iam_root.delete_group(GroupName=name)
-
-@pytest.mark.group_policy
-@pytest.mark.iam_account
-def test_account_inline_group_policy_allow(iam_root):
-    path = get_iam_path_prefix()
-    username = make_iam_name('User')
-    groupname = make_iam_name('Group')
-    bucket_name = get_new_bucket_name()
-
-    iam_root.create_user(UserName=username, Path=path)
-
-    key = iam_root.create_access_key(UserName=username)['AccessKey']
-    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                              aws_secret_access_key=key['SecretAccessKey'])
-
-    iam_root.create_group(GroupName=groupname, Path=path)
-    iam_root.add_user_to_group(GroupName=groupname, UserName=username)
-
-    # the access key may take a bit to start working. retry until it returns
-    # something other than InvalidAccessKeyId
-    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
-    # expect AccessDenied because no identity policy allows s3 actions
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    # add a group policy that allows s3 actions
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:*',
-            'Resource': '*'
-            }]
-        })
-    policy_name = 'AllowStar'
-    iam_root.put_group_policy(GroupName=groupname, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, client.list_buckets)
-
-@pytest.mark.group_policy
-@pytest.mark.iam_account
-def test_account_managed_group_policy_allow(iam_root):
-    path = get_iam_path_prefix()
-    username = make_iam_name('User')
-    groupname = make_iam_name('Group')
-    bucket_name = get_new_bucket_name()
-
-    iam_root.create_user(UserName=username, Path=path)
-
-    key = iam_root.create_access_key(UserName=username)['AccessKey']
-    client = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                              aws_secret_access_key=key['SecretAccessKey'])
-
-    iam_root.create_group(GroupName=groupname, Path=path)
-    iam_root.add_user_to_group(GroupName=groupname, UserName=username)
-
-    # the access key may take a bit to start working. retry until it returns
-    # something other than InvalidAccessKeyId
-    e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, client.list_buckets)
-    # expect AccessDenied because no identity policy allows s3 actions
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    # add a group policy that allows s3 read actions
-    policy_arn = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
-    iam_root.attach_group_policy(GroupName=groupname, PolicyArn=policy_arn)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, client.list_buckets)
-
-
-assume_role_policy = json.dumps({
-    'Version': '2012-10-17',
-    'Statement': [{
-        'Effect': 'Allow',
-        'Action': 'sts:AssumeRole',
-        'Principal': {'AWS': '*'}
-        }]
-    })
-
-# IAM Role apis
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_create(iam_root):
-    path = get_iam_path_prefix()
-    name1 = make_iam_name('R1')
-    desc = 'my role description'
-    max_duration = 43200
-    response = iam_root.create_role(RoleName=name1, Path=path, AssumeRolePolicyDocument=assume_role_policy, Description=desc, MaxSessionDuration=max_duration)
-    role = response['Role']
-    assert role['Path'] == path
-    assert role['RoleName'] == name1
-    assert assume_role_policy == json.dumps(role['AssumeRolePolicyDocument'])
-    assert len(role['RoleId'])
-    arn = role['Arn']
-    assert arn.startswith('arn:aws:iam:')
-    assert arn.endswith(f':role{path}{name1}')
-    assert role['CreateDate'] > datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-    # AWS doesn't include these for CreateRole, only GetRole
-    #assert desc == role['Description']
-    #assert max_duration == role['MaxSessionDuration']
-
-    response = iam_root.get_role(RoleName=name1)
-    role = response['Role']
-    assert arn == role['Arn']
-    assert desc == role['Description']
-    assert max_duration == role['MaxSessionDuration']
-
-    path2 = get_iam_path_prefix() + 'foo/'
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_role(RoleName=name1, Path=path2, AssumeRolePolicyDocument=assume_role_policy)
-
-    name2 = make_iam_name('R2')
-    response = iam_root.create_role(RoleName=name2, Path=path2, AssumeRolePolicyDocument=assume_role_policy)
-    role = response['Role']
-    assert role['Path'] == path2
-    assert role['RoleName'] == name2
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_case_insensitive_name(iam_root):
-    path = get_iam_path_prefix()
-    name_upper = make_iam_name('R1')
-    name_lower = make_iam_name('r1')
-    response = iam_root.create_role(RoleName=name_upper, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-    rid = response['Role']['RoleId']
-
-    # name is case-insensitive, so 'r1' should also conflict
-    with pytest.raises(iam_root.exceptions.EntityAlreadyExistsException):
-        iam_root.create_role(RoleName=name_lower, AssumeRolePolicyDocument=assume_role_policy)
-
-    # search for 'r1' should return the same 'R1' role
-    response = iam_root.get_role(RoleName=name_lower)
-    assert rid == response['Role']['RoleId']
-
-    # delete for 'r1' should delete the same 'R1' role
-    iam_root.delete_role(RoleName=name_lower)
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_role(RoleName=name_lower)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_delete(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('U1')
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_role(RoleName=name)
-
-    response = iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-    uid = response['Role']['RoleId']
-    create_date = response['Role']['CreateDate']
-
-    iam_root.delete_role(RoleName=name)
-
-    response = iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-    assert uid != response['Role']['RoleId']
-    assert create_date <= response['Role']['CreateDate']
-
-def role_list_names(client, **kwargs):
-    p = client.get_paginator('list_roles')
-    rolenames = []
-    for response in p.paginate(**kwargs):
-        rolenames += [u['RoleName'] for u in response['Roles']]
-    return rolenames
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_list(iam_root):
-    path = get_iam_path_prefix()
-    response = iam_root.list_roles(PathPrefix=path)
-    assert len(response['Roles']) == 0
-    assert response['IsTruncated'] == False
-
-    name1 = make_iam_name('aa')
-    name2 = make_iam_name('Ab')
-    name3 = make_iam_name('ac')
-    name4 = make_iam_name('Ad')
-
-    # sort order is independent of CreateDate, Path, and RoleName capitalization
-    iam_root.create_role(RoleName=name4, Path=path+'w/', AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name3, Path=path+'x/', AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name2, Path=path+'y/', AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name1, Path=path+'z/', AssumeRolePolicyDocument=assume_role_policy)
-
-    assert [name1, name2, name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path)
-    assert [name1, name2, name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path, PaginationConfig={'PageSize': 1})
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_list_path_prefix(iam_root):
-    path = get_iam_path_prefix()
-    response = iam_root.list_roles(PathPrefix=path)
-    assert len(response['Roles']) == 0
-    assert response['IsTruncated'] == False
-
-    name1 = make_iam_name('a')
-    name2 = make_iam_name('b')
-    name3 = make_iam_name('c')
-    name4 = make_iam_name('d')
-
-    iam_root.create_role(RoleName=name1, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name2, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name3, Path=path+'a/', AssumeRolePolicyDocument=assume_role_policy)
-    iam_root.create_role(RoleName=name4, Path=path+'a/x/', AssumeRolePolicyDocument=assume_role_policy)
-
-    assert [name1, name2, name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path)
-    assert [name1, name2, name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path,
-                            PaginationConfig={'PageSize': 1})
-    assert [name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path+'a')
-    assert [name3, name4] == \
-            role_list_names(iam_root, PathPrefix=path+'a',
-                            PaginationConfig={'PageSize': 1})
-    assert [name4] == \
-            role_list_names(iam_root, PathPrefix=path+'a/x')
-    assert [name4] == \
-            role_list_names(iam_root, PathPrefix=path+'a/x',
-                            PaginationConfig={'PageSize': 1})
-    assert [] == role_list_names(iam_root, PathPrefix=path+'a/x/d')
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-def test_account_role_update(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('a')
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.update_role(RoleName=name)
-
-    iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-
-    response = iam_root.get_role(RoleName=name)
-    assert name == response['Role']['RoleName']
-    arn = response['Role']['Arn']
-    rid = response['Role']['RoleId']
-
-    desc = 'my role description'
-    iam_root.update_role(RoleName=name, Description=desc, MaxSessionDuration=43200)
-
-    response = iam_root.get_role(RoleName=name)
-    assert rid == response['Role']['RoleId']
-    assert arn == response['Role']['Arn']
-    assert desc == response['Role']['Description']
-    assert 43200 == response['Role']['MaxSessionDuration']
-
-
-role_policy = json.dumps({
-    'Version': '2012-10-17',
-    'Statement': [{
-        'Effect': 'Allow',
-        'Action': 's3:*',
-        "Resource": "*"
-        }]
-    })
-
-# IAM RolePolicy apis
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_account_role_policy(iam_root):
-    path = get_iam_path_prefix()
-    role_name = make_iam_name('r')
-    policy_name = 'MyPolicy'
-    policy2_name = 'AnotherPolicy'
-
-    # Get/Put/Delete fail on nonexistent RoleName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)
-
-    iam_root.create_role(RoleName=role_name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-
-    # Get/Delete fail on nonexistent PolicyName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
-
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=role_policy)
-
-    response = iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
-    assert role_name == response['RoleName']
-    assert policy_name == response['PolicyName']
-    assert role_policy == json.dumps(response['PolicyDocument'])
-
-    response = iam_root.list_role_policies(RoleName=role_name)
-    assert [policy_name] == response['PolicyNames']
-
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy2_name, PolicyDocument=role_policy)
-
-    response = iam_root.list_role_policies(RoleName=role_name)
-    assert [policy2_name, policy_name] == response['PolicyNames']
-
-    iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
-    iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy2_name)
-
-    # Get/Delete fail after Delete
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_role_policy(RoleName=role_name, PolicyName=policy_name)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_role_policy(RoleName=role_name, PolicyName=policy_name)
-
-@pytest.mark.role_policy
-@pytest.mark.iam_account
-def test_account_role_policy_managed(iam_root):
-    path = get_iam_path_prefix()
-    name = make_iam_name('name')
-    policy1 = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
-    policy2 = 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess'
-
-    # Attach/Detach/List fail on nonexistent RoleName
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy1)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.list_attached_role_policies(RoleName=name)
-
-    iam_root.create_role(RoleName=name, Path=path, AssumeRolePolicyDocument=assume_role_policy)
-
-    # Detach fails on unattached PolicyArn
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy1)
-
-    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
-    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy1)
-
-    response = iam_root.list_attached_role_policies(RoleName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    iam_root.attach_role_policy(RoleName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_role_policies(RoleName=name)
-    policies = response['AttachedPolicies']
-    assert len(policies) == 2
-    names = [p['PolicyName'] for p in policies]
-    arns = [p['PolicyArn'] for p in policies]
-    assert 'AmazonS3FullAccess' in names
-    assert policy1 in arns
-    assert 'AmazonS3ReadOnlyAccess' in names
-    assert policy2 in arns
-
-    iam_root.detach_role_policy(RoleName=name, PolicyArn=policy2)
-
-    # Detach fails after Detach
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.detach_role_policy(RoleName=name, PolicyArn=policy2)
-
-    response = iam_root.list_attached_role_policies(RoleName=name)
-    assert len(response['AttachedPolicies']) == 1
-    assert 'AmazonS3FullAccess' == response['AttachedPolicies'][0]['PolicyName']
-    assert policy1 == response['AttachedPolicies'][0]['PolicyArn']
-
-    # DeleteRole fails while policies are still attached
-    with pytest.raises(iam_root.exceptions.DeleteConflictException):
-        iam_root.delete_role(RoleName=name)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_account_role_policy_allow(iam_root):
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('MyUser')
-    role_name = make_iam_name('MyRole')
-    session_name = 'MySession'
-
-    user = iam_root.create_user(UserName=user_name, Path=path)['User']
-    user_arn = user['Arn']
-
-    trust_policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'sts:AssumeRole',
-            'Principal': {'AWS': user_arn}
-            }]
-        })
-    # returns MalformedPolicyDocument until the user arn starts working
-    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
-                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
-    role_arn = role['Arn']
-
-    key = iam_root.create_access_key(UserName=user_name)['AccessKey']
-    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
-                         aws_secret_access_key=key['SecretAccessKey'])
-
-    # returns InvalidClientTokenId or AccessDenied until the access key starts working
-    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
-                        RoleArn=role_arn, RoleSessionName=session_name)
-    creds = response['Credentials']
-
-    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
-                          aws_secret_access_key = creds['SecretAccessKey'],
-                          aws_session_token = creds['SessionToken'])
-
-    # expect AccessDenied because no identity policy allows s3 actions
-    e = assert_raises(ClientError, s3.list_buckets)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    policy_name = 'AllowListAllMyBuckets'
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:ListAllMyBuckets',
-            'Resource': '*'
-            }]
-        })
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, s3.list_buckets)
-
-# alt account user assumes main account role to access main account bucket
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_same_account_role_policy_allow(iam_root, iam_alt_root):
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    role_name = make_iam_name('MyRole')
-    session_name = 'MySession'
-    bucket_name = get_new_bucket_name()
-
-    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
-    user_arn = user['Arn']
-    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
-
-    s3_main = get_iam_root_client(service_name='s3')
-    s3_main.create_bucket(Bucket=bucket_name)
-
-    trust_policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'sts:AssumeRole',
-            'Principal': {'AWS': user_arn}
-            }]
-        })
-    # returns MalformedPolicyDocument until the user arn starts working
-    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
-                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
-    role_arn = role['Arn']
-
-    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
-                         aws_secret_access_key=key['SecretAccessKey'])
-
-    # returns InvalidClientTokenId or AccessDenied until the access key starts working
-    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
-                        RoleArn=role_arn, RoleSessionName=session_name)
-    creds = response['Credentials']
-
-    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
-                          aws_secret_access_key = creds['SecretAccessKey'],
-                          aws_session_token = creds['SessionToken'])
-
-    # expect AccessDenied because no identity policy allows s3 actions
-    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    policy_name = 'AllowListBucket'
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:ListBucket',
-            'Resource': '*'
-            }]
-        })
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, s3.list_objects, Bucket=bucket_name)
-
-# alt account user assumes main account role to access alt account bucket
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_cross_account_role_policy_allow(iam_root, iam_alt_root):
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    role_name = make_iam_name('MyRole')
-    session_name = 'MySession'
-    bucket_name = get_new_bucket_name()
-
-    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
-    user_arn = user['Arn']
-    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
-
-    s3_alt = get_iam_alt_root_client(service_name='s3')
-    s3_alt.create_bucket(Bucket=bucket_name)
-
-    trust_policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'sts:AssumeRole',
-            'Principal': {'AWS': user_arn}
-            }]
-        })
-    # returns MalformedPolicyDocument until the user arn starts working
-    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
-                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
-    role_arn = role['Arn']
-
-    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
-                         aws_secret_access_key=key['SecretAccessKey'])
-
-    # returns InvalidClientTokenId or AccessDenied until the access key starts working
-    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
-                        RoleArn=role_arn, RoleSessionName=session_name)
-    creds = response['Credentials']
-
-    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
-                          aws_secret_access_key = creds['SecretAccessKey'],
-                          aws_session_token = creds['SessionToken'])
-
-    # expect AccessDenied because no identity policy allows s3 actions
-    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    policy_name = 'AllowListBucket'
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:ListBucket',
-            'Resource': '*'
-            }]
-        })
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # expect AccessDenied because no resource policy allows the main account
-    e = assert_raises(ClientError, s3.list_objects, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    # add a bucket policy that allows s3:ListBucket for the main account's arn
-    main_arn = iam_root.get_user()['User']['Arn']
-    s3_alt.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Principal': {'AWS': main_arn},
-            'Action': 's3:ListBucket',
-            'Resource': f'arn:aws:s3:::{bucket_name}'
-            }]
-        }))
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, s3.list_objects, Bucket=bucket_name)
-
-# alt account user assumes main account role to create a bucket
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_account_role_policy_allow_create_bucket(iam_root, iam_alt_root):
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    role_name = make_iam_name('MyRole')
-    session_name = 'MySession'
-    bucket_name = get_new_bucket_name()
-
-    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
-    user_arn = user['Arn']
-    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
-
-    trust_policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'sts:AssumeRole',
-            'Principal': {'AWS': user_arn}
-            }]
-        })
-    # returns MalformedPolicyDocument until the user arn starts working
-    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
-                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
-    role_arn = role['Arn']
-
-    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
-                         aws_secret_access_key=key['SecretAccessKey'])
-
-    # returns InvalidClientTokenId or AccessDenied until the access key starts working
-    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
-                        RoleArn=role_arn, RoleSessionName=session_name)
-    creds = response['Credentials']
-
-    s3 = get_iam_s3client(aws_access_key_id = creds['AccessKeyId'],
-                          aws_secret_access_key = creds['SecretAccessKey'],
-                          aws_session_token = creds['SessionToken'])
-
-    # expect AccessDenied because no identity policy allows s3 actions
-    e = assert_raises(ClientError, s3.create_bucket, Bucket=bucket_name, ObjectOwnership='ObjectWriter', ACL='private')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    policy_name = 'AllowCreateBucket'
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': ['s3:CreateBucket', 's3:PutBucketAcl'],
-            'Resource': '*'
-            }]
-        })
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, s3.create_bucket, Bucket=bucket_name, ObjectOwnership='ObjectWriter', ACL='private')
-
-    # verify that the bucket is owned by the role's account
-    s3_main = get_iam_root_client(service_name='s3')
-    response = s3_main.get_bucket_acl(Bucket=bucket_name)
-
-    main_arn = iam_root.get_user()['User']['Arn']
-    account_id = main_arn.removeprefix('arn:aws:iam::').removesuffix(':root')
-    assert response['Owner']['ID'] == account_id
-    assert response['Grants'][0]['Grantee']['ID'] == account_id
-
-# alt account user assumes main account role to read the role info
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.iam_role
-@pytest.mark.role_policy
-def test_account_role_policy_allow_get_role(iam_root, iam_alt_root):
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    role_name = make_iam_name('MyRole')
-    session_name = 'MySession'
-    bucket_name = get_new_bucket_name()
-
-    user = iam_alt_root.create_user(UserName=user_name, Path=path)['User']
-    user_arn = user['Arn']
-    key = iam_alt_root.create_access_key(UserName=user_name)['AccessKey']
-
-    trust_policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'sts:AssumeRole',
-            'Principal': {'AWS': user_arn}
-            }]
-        })
-    # returns MalformedPolicyDocument until the user arn starts working
-    role = retry_on('MalformedPolicyDocument', 10, iam_root.create_role,
-                    RoleName=role_name, Path=path, AssumeRolePolicyDocument=trust_policy)['Role']
-    role_arn = role['Arn']
-
-    sts = get_sts_client(aws_access_key_id=key['AccessKeyId'],
-                         aws_secret_access_key=key['SecretAccessKey'])
-
-    # returns InvalidClientTokenId or AccessDenied until the access key starts working
-    response = retry_on(('InvalidClientTokenId', 'AccessDenied'), 10, sts.assume_role,
-                        RoleArn=role_arn, RoleSessionName=session_name)
-    creds = response['Credentials']
-
-    iam = get_iam_root_client(service_name='iam',
-                              aws_access_key_id = creds['AccessKeyId'],
-                              aws_secret_access_key = creds['SecretAccessKey'],
-                              aws_session_token = creds['SessionToken'])
-
-    # expect AccessDenied because no identity policy allows iam actions
-    e = assert_raises(ClientError, iam.get_role, RoleName=role_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    policy_name = 'AllowGetRole'
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 'iam:GetRole',
-            'Resource': '*'
-            }]
-        })
-    iam_root.put_role_policy(RoleName=role_name, PolicyName=policy_name, PolicyDocument=policy)
-
-    # the policy may take a bit to start working. retry until it returns
-    # something other than AccessDenied
-    retry_on('AccessDenied', 10, iam.get_role, RoleName=role_name)
-
-
-# IAM OpenIDConnectProvider apis
-@pytest.mark.iam_account
-def test_account_oidc_provider(iam_root):
-    url_host = get_iam_path_prefix()[1:] + 'example.com'
-    url = 'http://' + url_host
-
-    response = iam_root.create_open_id_connect_provider(
-            ClientIDList=['my-application-id'],
-            ThumbprintList=['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'],
-            Url=url)
-    arn = response['OpenIDConnectProviderArn']
-    assert arn.endswith(f':oidc-provider/{url_host}')
-
-    response = iam_root.list_open_id_connect_providers()
-    arns = [p['Arn'] for p in response['OpenIDConnectProviderList']]
-    assert arn in arns
-
-    response = iam_root.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
-    assert url == response['Url']
-    assert ['my-application-id'] == response['ClientIDList']
-    assert ['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'] == response['ThumbprintList']
-
-    iam_root.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
-
-    response = iam_root.list_open_id_connect_providers()
-    arns = [p['Arn'] for p in response['OpenIDConnectProviderList']]
-    assert arn not in arns
-
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
-    with pytest.raises(iam_root.exceptions.NoSuchEntityException):
-        iam_root.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
-
-
-# test cross-account access, adding user policy before the bucket policy
-def _test_cross_account_user_bucket_policy(roots3, alt_root, alt_name, alt_arn):
-    # add a user policy that allows s3 actions
-    alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:*',
-            'Resource': '*'
-            }]
-        }))
-
-    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
-    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                             aws_secret_access_key=key['SecretAccessKey'])
-
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        # the access key may take a bit to start working. retry until it returns
-        # something other than InvalidAccessKeyId
-        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a bucket policy that allows s3:ListBucket for the iam user's arn
-        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Principal': {'AWS': alt_arn},
-                'Action': 's3:ListBucket',
-                'Resource': f'arn:aws:s3:::{bucket}'
-                }]
-            }))
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-# test cross-account access, adding bucket policy before the user policy
-def _test_cross_account_bucket_user_policy(roots3, alt_root, alt_name, alt_arn):
-    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
-    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                             aws_secret_access_key=key['SecretAccessKey'])
-
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        # add a bucket policy that allows s3:ListBucket for the iam user's arn
-        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Principal': {'AWS': alt_arn},
-                'Action': 's3:ListBucket',
-                'Resource': f'arn:aws:s3:::{bucket}'
-                }]
-            }))
-
-        # the access key may take a bit to start working. retry until it returns
-        # something other than InvalidAccessKeyId
-        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a user policy that allows s3 actions
-        alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Action': 's3:*',
-                'Resource': '*'
-                }]
-            }))
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_bucket_user_policy_allow_user_arn(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, user_arn)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_user_bucket_policy_allow_user_arn(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, user_arn)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_user_bucket_policy_allow_account_arn(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    account_arn = user_arn.replace(f':user{path}{user_name}', ':root')
-    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, account_arn)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_bucket_user_policy_allow_account_arn(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    account_arn = user_arn.replace(f':user{path}{user_name}', ':root')
-    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, account_arn)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_user_bucket_policy_allow_account_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    account_id = user_arn.removeprefix('arn:aws:iam::').removesuffix(f':user{path}{user_name}')
-    _test_cross_account_user_bucket_policy(roots3, iam_alt_root, user_name, account_id)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_bucket_user_policy_allow_account_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    user_arn = response['User']['Arn']
-    account_id = user_arn.removeprefix('arn:aws:iam::').removesuffix(f':user{path}{user_name}')
-    _test_cross_account_bucket_user_policy(roots3, iam_alt_root, user_name, account_id)
-
-
-# test cross-account access, adding user policy before the bucket acl
-def _test_cross_account_user_policy_bucket_acl(roots3, alt_root, alt_name, grantee):
-    # add a user policy that allows s3 actions
-    alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Action': 's3:*',
-            'Resource': '*'
-            }]
-        }))
-
-    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
-    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                             aws_secret_access_key=key['SecretAccessKey'])
-
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        # the access key may take a bit to start working. retry until it returns
-        # something other than InvalidAccessKeyId
-        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a bucket acl that grants READ access
-        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-# test cross-account access, adding bucket acl before the user policy
-def _test_cross_account_bucket_acl_user_policy(roots3, alt_root, alt_name, grantee):
-    key = alt_root.create_access_key(UserName=alt_name)['AccessKey']
-    alts3 = get_iam_s3client(aws_access_key_id=key['AccessKeyId'],
-                             aws_secret_access_key=key['SecretAccessKey'])
-
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        # add a bucket acl that grants READ access
-        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
-
-        # the access key may take a bit to start working. retry until it returns
-        # something other than InvalidAccessKeyId
-        e = assert_raises(ClientError, retry_on, 'InvalidAccessKeyId', 10, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a user policy that allows s3 actions
-        alt_root.put_user_policy(UserName=alt_name, PolicyName='AllowStar', PolicyDocument=json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Action': 's3:*',
-                'Resource': '*'
-                }]
-            }))
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.fails_on_aws # can't grant to individual users
-def test_cross_account_bucket_acl_user_policy_grant_user_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'id=' + response['User']['UserId']
-    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.fails_on_aws # can't grant to individual users
-def test_cross_account_user_policy_bucket_acl_grant_user_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    response = iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'id=' + response['User']['UserId']
-    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_bucket_acl_user_policy_grant_canonical_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'id=' + get_iam_alt_root_user_id()
-    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_user_policy_bucket_acl_grant_canonical_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'id=' + get_iam_alt_root_user_id()
-    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_bucket_acl_user_policy_grant_account_email(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'emailAddress=' + get_iam_alt_root_email()
-    _test_cross_account_bucket_acl_user_policy(roots3, iam_alt_root, user_name, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_user_policy_bucket_acl_grant_account_email(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    path = get_iam_path_prefix()
-    user_name = make_iam_name('AltUser')
-    iam_alt_root.create_user(UserName=user_name, Path=path)
-    grantee = 'emailAddress=' + get_iam_alt_root_email()
-    _test_cross_account_user_policy_bucket_acl(roots3, iam_alt_root, user_name, grantee)
-
-
-# test root cross-account access with bucket policy
-def _test_cross_account_root_bucket_policy(roots3, alts3, alt_arn):
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        e = assert_raises(ClientError, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a bucket policy that allows s3:ListBucket for the iam user's arn
-        roots3.put_bucket_policy(Bucket=bucket, Policy=json.dumps({
-            'Version': '2012-10-17',
-            'Statement': [{
-                'Effect': 'Allow',
-                'Principal': {'AWS': alt_arn},
-                'Action': 's3:ListBucket',
-                'Resource': f'arn:aws:s3:::{bucket}'
-                }]
-            }))
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_root_bucket_policy_allow_account_arn(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    alts3 = get_iam_alt_root_client(service_name='s3')
-    alt_arn = iam_alt_root.get_user()['User']['Arn']
-    _test_cross_account_root_bucket_policy(roots3, alts3, alt_arn)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_root_bucket_policy_allow_account_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    alts3 = get_iam_alt_root_client(service_name='s3')
-    alt_arn = iam_alt_root.get_user()['User']['Arn']
-    account_id = alt_arn.removeprefix('arn:aws:iam::').removesuffix(':root')
-    _test_cross_account_root_bucket_policy(roots3, alts3, account_id)
-
-# test root cross-account access with bucket acls
-def _test_cross_account_root_bucket_acl(roots3, alts3, grantee):
-    # create a bucket with the root user
-    bucket = get_new_bucket(roots3)
-    try:
-        e = assert_raises(ClientError, alts3.list_objects, Bucket=bucket)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'AccessDenied'
-
-        # add a bucket acl that grants READ
-        roots3.put_bucket_acl(Bucket=bucket, GrantRead=grantee)
-
-        # verify that the iam user can eventually access it
-        retry_on('AccessDenied', 10, alts3.list_objects, Bucket=bucket)
-    finally:
-        roots3.delete_bucket(Bucket=bucket)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_root_bucket_acl_grant_canonical_id(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    alts3 = get_iam_alt_root_client(service_name='s3')
-    grantee = 'id=' + get_iam_alt_root_user_id()
-    _test_cross_account_root_bucket_acl(roots3, alts3, grantee)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-def test_cross_account_root_bucket_acl_grant_account_email(iam_root, iam_alt_root):
-    roots3 = get_iam_root_client(service_name='s3')
-    alts3 = get_iam_alt_root_client(service_name='s3')
-    grantee = 'emailAddress=' + get_iam_alt_root_email()
-    _test_cross_account_root_bucket_acl(roots3, alts3, grantee)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
deleted file mode 100644 (file)
index a0c3fac..0000000
+++ /dev/null
@@ -1,13553 +0,0 @@
-import boto3
-import botocore.session
-from botocore.exceptions import ClientError
-from botocore.exceptions import ParamValidationError
-import isodate
-import email.utils
-import datetime
-import threading
-import re
-import pytz
-from collections import OrderedDict
-import requests
-import json
-import base64
-import hmac
-import hashlib
-import xml.etree.ElementTree as ET
-import time
-import operator
-import pytest
-import os
-import string
-import random
-import socket
-import dateutil.parser
-import ssl
-from collections import namedtuple
-from collections import defaultdict
-from io import StringIO
-from io import BytesIO
-
-from email.header import decode_header
-
-from .utils import assert_raises
-from .utils import generate_random
-from .utils import _get_status_and_error_code
-from .utils import _get_status
-
-from .policy import Policy, Statement, make_json_policy
-
-from . import (
-    configfile,
-    setup_teardown,
-    get_client,
-    get_prefix,
-    get_unauthenticated_client,
-    get_bad_auth_client,
-    get_v2_client,
-    get_new_bucket,
-    get_new_bucket_name,
-    get_new_bucket_resource,
-    get_config_is_secure,
-    get_config_host,
-    get_config_port,
-    get_config_endpoint,
-    get_config_ssl_verify,
-    get_main_aws_access_key,
-    get_main_aws_secret_key,
-    get_main_display_name,
-    get_main_user_id,
-    get_main_email,
-    get_main_api_name,
-    get_alt_aws_access_key,
-    get_alt_aws_secret_key,
-    get_alt_display_name,
-    get_alt_user_id,
-    get_alt_email,
-    get_alt_client,
-    get_tenant_client,
-    get_tenant_iam_client,
-    get_tenant_user_id,
-    get_buckets_list,
-    get_objects_list,
-    get_main_kms_keyid,
-    get_secondary_kms_keyid,
-    get_svc_client,
-    get_cloud_storage_class,
-    get_cloud_retain_head_object,
-    get_cloud_regular_storage_class,
-    get_cloud_target_path,
-    get_cloud_target_storage_class,
-    get_cloud_client,
-    nuke_prefixed_buckets,
-    configured_storage_classes,
-    get_lc_debug_interval,
-    )
-
-
-def _bucket_is_empty(bucket):
-    is_empty = True
-    for obj in bucket.objects.all():
-        is_empty = False
-        break
-    return is_empty
-
-def test_bucket_list_empty():
-    bucket = get_new_bucket_resource()
-    is_empty = _bucket_is_empty(bucket)
-    assert is_empty == True
-
-@pytest.mark.list_objects_v2
-def test_bucket_list_distinct():
-    bucket1 = get_new_bucket_resource()
-    bucket2 = get_new_bucket_resource()
-    obj = bucket1.put_object(Body='str', Key='asdf')
-    is_empty = _bucket_is_empty(bucket2)
-    assert is_empty == True
-
-def _create_objects(bucket=None, bucket_name=None, keys=[]):
-    """
-    Populate a (specified or new) bucket with objects with
-    specified names (and contents identical to their names).
-    """
-    if bucket_name is None:
-        bucket_name = get_new_bucket_name()
-    if bucket is None:
-        bucket = get_new_bucket_resource(name=bucket_name)
-
-    for key in keys:
-        obj = bucket.put_object(Body=key, Key=key)
-
-    return bucket_name
-
-def _get_keys(response):
-    """
-    return lists of strings that are the keys from a client.list_objects() response
-    """
-    keys = []
-    if 'Contents' in response:
-        objects_list = response['Contents']
-        keys = [obj['Key'] for obj in objects_list]
-    return keys
-
-def _get_prefixes(response):
-    """
-    return lists of strings that are prefixes from a client.list_objects() response
-    """
-    prefixes = []
-    if 'CommonPrefixes' in response:
-        prefix_list = response['CommonPrefixes']
-        prefixes = [prefix['Prefix'] for prefix in prefix_list]
-    return prefixes
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_many():
-    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
-    keys = _get_keys(response)
-    assert len(keys) == 2
-    assert keys == ['bar', 'baz']
-    assert response['IsTruncated'] == True
-
-    response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
-    keys = _get_keys(response)
-    assert len(keys) == 1
-    assert response['IsTruncated'] == False
-    assert keys == ['foo']
-
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_many():
-    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
-    keys = _get_keys(response)
-    assert len(keys) == 2
-    assert keys == ['bar', 'baz']
-    assert response['IsTruncated'] == True
-
-    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
-    keys = _get_keys(response)
-    assert len(keys) == 1
-    assert response['IsTruncated'] == False
-    assert keys == ['foo']
-
-@pytest.mark.list_objects_v2
-def test_basic_key_count():
-    client = get_client()
-    bucket_names = []
-    bucket_name = get_new_bucket_name()
-    client.create_bucket(Bucket=bucket_name)
-    for j in range(5):
-            client.put_object(Bucket=bucket_name, Key=str(j))
-    response1 = client.list_objects_v2(Bucket=bucket_name)
-    assert response1['KeyCount'] == 5
-
-def test_bucket_list_delimiter_basic():
-    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
-    assert response['Delimiter'] == '/'
-    keys = _get_keys(response)
-    assert keys == ['asdf']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    assert prefixes == ['foo/', 'quux/']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_basic():
-    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
-    assert response['Delimiter'] == '/'
-    keys = _get_keys(response)
-    assert keys == ['asdf']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    assert prefixes == ['foo/', 'quux/']
-    assert response['KeyCount'] == len(prefixes) + len(keys)
-
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_encoding_basic():
-    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
-    assert response['Delimiter'] == '/'
-    keys = _get_keys(response)
-    assert keys == ['asdf%2Bb']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 3
-    assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/']
-
-def test_bucket_list_encoding_basic():
-    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
-    assert response['Delimiter'] == '/'
-    keys = _get_keys(response)
-    assert keys == ['asdf%2Bb']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 3
-    assert prefixes == ['foo%2B1/', 'foo/', 'quux%20ab/']
-
-
-def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
-                         is_truncated, check_objs, check_prefixes, next_marker):
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
-    assert response['IsTruncated'] == is_truncated
-    if 'NextMarker' not in response:
-        response['NextMarker'] = None
-    assert response['NextMarker'] == next_marker
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-
-    assert len(keys) == len(check_objs)
-    assert len(prefixes) == len(check_prefixes)
-    assert keys == check_objs
-    assert prefixes == check_prefixes
-
-    return response['NextMarker']
-
-def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
-                         is_truncated, check_objs, check_prefixes, last=False):
-    client = get_client()
-
-    params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
-    if continuation_token is not None:
-        params['ContinuationToken'] = continuation_token
-    else:
-        params['StartAfter'] = ''
-    response = client.list_objects_v2(**params)
-    assert response['IsTruncated'] == is_truncated
-    if 'NextContinuationToken' not in response:
-        response['NextContinuationToken'] = None
-    if last:
-        assert response['NextContinuationToken'] == None
-
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-
-    assert len(keys) == len(check_objs)
-    assert len(prefixes) == len(check_prefixes)
-    assert keys == check_objs
-    assert prefixes == check_prefixes
-
-    return response['NextContinuationToken']
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_delimiter_prefix():
-    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
-
-    delim = '/'
-    marker = ''
-    prefix = ''
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
-
-    prefix = 'boo/'
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
-
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_delimiter_prefix():
-    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
-
-    delim = '/'
-    continuation_token = ''
-    prefix = ''
-
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
-
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
-
-    prefix = 'boo/'
-
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
-
-    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
-
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
-    bucket_name = _create_objects(keys=['asdf/'])
-    validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
-
-def test_bucket_list_delimiter_prefix_ends_with_delimiter():
-    bucket_name = _create_objects(keys=['asdf/'])
-    validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
-
-def test_bucket_list_delimiter_alt():
-    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='a')
-    assert response['Delimiter'] == 'a'
-
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    assert prefixes == ['ba', 'ca']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_alt():
-    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
-    assert response['Delimiter'] == 'a'
-
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    assert prefixes == ['ba', 'ca']
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_delimiter_prefix_underscore():
-    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
-
-    delim = '/'
-    marker = ''
-    prefix = ''
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
-
-    prefix = '_under1/'
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
-    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
-
-    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
-
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_delimiter_prefix_underscore():
-    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
-
-    delim = '/'
-    continuation_token = ''
-    prefix = ''
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
-
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
-
-    prefix = '_under1/'
-
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
-
-    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
-
-
-def test_bucket_list_delimiter_percentage():
-    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='%')
-    assert response['Delimiter'] == '%'
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b%', 'c%']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_percentage():
-    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
-    assert response['Delimiter'] == '%'
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b%', 'c%']
-
-def test_bucket_list_delimiter_whitespace():
-    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
-    assert response['Delimiter'] == ' '
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b ', 'c ']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_whitespace():
-    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
-    assert response['Delimiter'] == ' '
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b ', 'c ']
-
-def test_bucket_list_delimiter_dot():
-    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='.')
-    assert response['Delimiter'] == '.'
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b.', 'c.']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_dot():
-    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
-    assert response['Delimiter'] == '.'
-    keys = _get_keys(response)
-    # foo contains no 'a' and so is a complete key
-    assert keys == ['foo']
-
-    prefixes = _get_prefixes(response)
-    assert len(prefixes) == 2
-    # bar, baz, and cab should be broken up by the 'a' delimiters
-    assert prefixes == ['b.', 'c.']
-
-def test_bucket_list_delimiter_unreadable():
-    key_names=['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
-    assert response['Delimiter'] == '\x0a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_unreadable():
-    key_names=['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
-    assert response['Delimiter'] == '\x0a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-def test_bucket_list_delimiter_empty():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='')
-    # putting an empty value into Delimiter will not return a value in the response
-    assert not 'Delimiter' in response
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_empty():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
-    # putting an empty value into Delimiter will not return a value in the response
-    assert not 'Delimiter' in response
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-def test_bucket_list_delimiter_none():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name)
-    # putting an empty value into Delimiter will not return a value in the response
-    assert not 'Delimiter' in response
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_none():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name)
-    # putting an empty value into Delimiter will not return a value in the response
-    assert not 'Delimiter' in response
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_fetchowner_notempty():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
-    objs_list = response['Contents']
-    assert 'Owner' in objs_list[0]
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_fetchowner_defaultempty():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name)
-    objs_list = response['Contents']
-    assert not 'Owner' in objs_list[0]
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_fetchowner_empty():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
-    objs_list = response['Contents']
-    assert not 'Owner' in objs_list[0]
-
-def test_bucket_list_delimiter_not_exist():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
-    # putting an empty value into Delimiter will not return a value in the response
-    assert response['Delimiter'] == '/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_delimiter_not_exist():
-    key_names = ['bar', 'baz', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
-    # putting an empty value into Delimiter will not return a value in the response
-    assert response['Delimiter'] == '/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_delimiter_not_skip_special():
-    key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
-    key_names2 = ['1999', '1999#', '1999+', '2000']
-    key_names += key_names2
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
-    assert response['Delimiter'] == '/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names2
-    assert prefixes == ['0/']
-
-def test_bucket_list_prefix_basic():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
-    assert response['Prefix'] == 'foo/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['foo/bar', 'foo/baz']
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_basic():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
-    assert response['Prefix'] == 'foo/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['foo/bar', 'foo/baz']
-    assert prefixes == []
-
-# just testing that we can do the delimeter and prefix logic on non-slashes
-def test_bucket_list_prefix_alt():
-    key_names = ['bar', 'baz', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='ba')
-    assert response['Prefix'] == 'ba'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['bar', 'baz']
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_alt():
-    key_names = ['bar', 'baz', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
-    assert response['Prefix'] == 'ba'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['bar', 'baz']
-    assert prefixes == []
-
-def test_bucket_list_prefix_empty():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='')
-    assert response['Prefix'] == ''
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_empty():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
-    assert response['Prefix'] == ''
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-def test_bucket_list_prefix_none():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='')
-    assert response['Prefix'] == ''
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_none():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
-    assert response['Prefix'] == ''
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == key_names
-    assert prefixes == []
-
-def test_bucket_list_prefix_not_exist():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='d')
-    assert response['Prefix'] == 'd'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_not_exist():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
-    assert response['Prefix'] == 'd'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-def test_bucket_list_prefix_unreadable():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
-    assert response['Prefix'] == '\x0a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_unreadable():
-    key_names = ['foo/bar', 'foo/baz', 'quux']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
-    assert response['Prefix'] == '\x0a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-def test_bucket_list_prefix_delimiter_basic():
-    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
-    assert response['Prefix'] == 'foo/'
-    assert response['Delimiter'] == '/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['foo/bar']
-    assert prefixes == ['foo/baz/']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_delimiter_basic():
-    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
-    assert response['Prefix'] == 'foo/'
-    assert response['Delimiter'] == '/'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['foo/bar']
-    assert prefixes == ['foo/baz/']
-
-def test_bucket_list_prefix_delimiter_alt():
-    key_names = ['bar', 'bazar', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
-    assert response['Prefix'] == 'ba'
-    assert response['Delimiter'] == 'a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['bar']
-    assert prefixes == ['baza']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_delimiter_alt():
-    key_names = ['bar', 'bazar', 'cab', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
-    assert response['Prefix'] == 'ba'
-    assert response['Delimiter'] == 'a'
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['bar']
-    assert prefixes == ['baza']
-
-def test_bucket_list_prefix_delimiter_prefix_not_exist():
-    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
-    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-def test_bucket_list_prefix_delimiter_delimiter_not_exist():
-    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['b/a/c', 'b/a/g', 'b/a/r']
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
-    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == ['b/a/c', 'b/a/g', 'b/a/r']
-    assert prefixes == []
-
-def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
-    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
-    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
-
-    keys = _get_keys(response)
-    prefixes = _get_prefixes(response)
-    assert keys == []
-    assert prefixes == []
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_maxkeys_one():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
-    assert response['IsTruncated'] == True
-
-    keys = _get_keys(response)
-    assert keys == key_names[0:1]
-
-    response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
-    assert response['IsTruncated'] == False
-
-    keys = _get_keys(response)
-    assert keys == key_names[1:]
-
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_maxkeys_one():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
-    assert response['IsTruncated'] == True
-
-    keys = _get_keys(response)
-    assert keys == key_names[0:1]
-
-    response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
-    assert response['IsTruncated'] == False
-
-    keys = _get_keys(response)
-    assert keys == key_names[1:]
-
-def test_bucket_list_maxkeys_zero():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
-
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_maxkeys_zero():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
-
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == []
-
-def test_bucket_list_maxkeys_none():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name)
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-    assert response['MaxKeys'] == 1000
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_maxkeys_none():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name)
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-    assert response['MaxKeys'] == 1000
-
-def get_http_response_body(**kwargs):
-    global http_response_body
-    http_response_body = kwargs['http_response'].__dict__['_content']
-
-def parseXmlToJson(xml):
-  response = {}
-
-  for child in list(xml):
-    if len(list(child)) > 0:
-      response[child.tag] = parseXmlToJson(child)
-    else:
-      response[child.tag] = child.text or ''
-
-    # one-liner equivalent
-    # response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
-
-  return response
-
-@pytest.mark.fails_on_aws
-def test_account_usage():
-    # boto3.set_stream_logger(name='botocore')
-    client = get_client()
-    # adds the unordered query parameter
-    def add_usage(**kwargs):
-        kwargs['params']['url'] += "?usage"
-    client.meta.events.register('before-call.s3.ListBuckets', add_usage)
-    client.meta.events.register('after-call.s3.ListBuckets', get_http_response_body)
-    client.list_buckets()
-    xml    = ET.fromstring(http_response_body.decode('utf-8'))
-    parsed = parseXmlToJson(xml)
-    summary = parsed['Summary']
-    assert summary['QuotaMaxBytes'] == '-1'
-    assert summary['QuotaMaxBuckets'] == '1000'
-    assert summary['QuotaMaxObjCount'] == '-1'
-    assert summary['QuotaMaxBytesPerBucket'] == '-1'
-    assert summary['QuotaMaxObjCountPerBucket'] == '-1'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_head_bucket_usage():
-    # boto3.set_stream_logger(name='botocore')
-    client = get_client()
-    bucket_name = _create_objects(keys=['foo'])
-    # adds the unordered query parameter
-    client.meta.events.register('after-call.s3.HeadBucket', get_http_response)
-    client.head_bucket(Bucket=bucket_name)
-    hdrs = http_response['headers']
-    assert hdrs['X-RGW-Object-Count'] == '1'
-    assert hdrs['X-RGW-Bytes-Used'] == '3'
-    assert hdrs['X-RGW-Quota-User-Size'] == '-1'
-    assert hdrs['X-RGW-Quota-User-Objects'] == '-1'
-    assert hdrs['X-RGW-Quota-Max-Buckets'] == '1000'
-    assert hdrs['X-RGW-Quota-Bucket-Size'] == '-1'
-    assert hdrs['X-RGW-Quota-Bucket-Objects'] == '-1'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_unordered():
-    # boto3.set_stream_logger(name='botocore')
-    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
-               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
-               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
-               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
-               'xix', 'yak', 'zoo']
-    bucket_name = _create_objects(keys=keys_in)
-    client = get_client()
-
-    # adds the unordered query parameter
-    def add_unordered(**kwargs):
-        kwargs['params']['url'] += "&allow-unordered=true"
-    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
-
-    # test simple retrieval
-    response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
-    unordered_keys_out = _get_keys(response)
-    assert len(keys_in) == len(unordered_keys_out)
-    assert keys_in.sort() == unordered_keys_out.sort()
-
-    # test retrieval with prefix
-    response = client.list_objects(Bucket=bucket_name,
-                                   MaxKeys=1000,
-                                   Prefix="abc/")
-    unordered_keys_out = _get_keys(response)
-    assert 5 == len(unordered_keys_out)
-
-    # test incremental retrieval with marker
-    response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
-    unordered_keys_out = _get_keys(response)
-    assert 6 == len(unordered_keys_out)
-
-    # now get the next bunch
-    response = client.list_objects(Bucket=bucket_name,
-                                   MaxKeys=6,
-                                   Marker=unordered_keys_out[-1])
-    unordered_keys_out2 = _get_keys(response)
-    assert 6 == len(unordered_keys_out2)
-
-    # make sure there's no overlap between the incremental retrievals
-    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
-    assert 0 == len(intersect)
-
-    # verify that unordered used with delimiter results in error
-    e = assert_raises(ClientError,
-                      client.list_objects, Bucket=bucket_name, Delimiter="/")
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_unordered():
-    # boto3.set_stream_logger(name='botocore')
-    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
-               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
-               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
-               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
-               'xix', 'yak', 'zoo']
-    bucket_name = _create_objects(keys=keys_in)
-    client = get_client()
-
-    # adds the unordered query parameter
-    def add_unordered(**kwargs):
-        kwargs['params']['url'] += "&allow-unordered=true"
-    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
-
-    # test simple retrieval
-    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
-    unordered_keys_out = _get_keys(response)
-    assert len(keys_in) == len(unordered_keys_out)
-    assert keys_in.sort() == unordered_keys_out.sort()
-
-    # test retrieval with prefix
-    response = client.list_objects_v2(Bucket=bucket_name,
-                                   MaxKeys=1000,
-                                   Prefix="abc/")
-    unordered_keys_out = _get_keys(response)
-    assert 5 == len(unordered_keys_out)
-
-    # test incremental retrieval with marker
-    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
-    unordered_keys_out = _get_keys(response)
-    assert 6 == len(unordered_keys_out)
-
-    # now get the next bunch
-    response = client.list_objects_v2(Bucket=bucket_name,
-                                   MaxKeys=6,
-                                   StartAfter=unordered_keys_out[-1])
-    unordered_keys_out2 = _get_keys(response)
-    assert 6 == len(unordered_keys_out2)
-
-    # make sure there's no overlap between the incremental retrievals
-    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
-    assert 0 == len(intersect)
-
-    # verify that unordered used with delimiter results in error
-    e = assert_raises(ClientError,
-                      client.list_objects, Bucket=bucket_name, Delimiter="/")
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-
-def test_bucket_list_maxkeys_invalid():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    # adds invalid max keys to url
-    # before list_objects is called
-    def add_invalid_maxkeys(**kwargs):
-        kwargs['params']['url'] += "&max-keys=blah"
-    client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
-
-    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-
-
-def test_bucket_list_marker_none():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name)
-    assert response['Marker'] == ''
-
-
-def test_bucket_list_marker_empty():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Marker='')
-    assert response['Marker'] == ''
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_continuationtoken_empty():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
-    assert response['ContinuationToken'] == ''
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_continuationtoken():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
-    next_continuation_token = response1['NextContinuationToken']
-
-    response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
-    assert response2['ContinuationToken'] == next_continuation_token
-    assert response2['IsTruncated'] == False
-    key_names2 = ['baz', 'foo', 'quxx']
-    keys = _get_keys(response2)
-    assert keys == key_names2
-
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_bucket_listv2_both_continuationtoken_startafter():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
-    next_continuation_token = response1['NextContinuationToken']
-
-    response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
-    assert response2['ContinuationToken'] == next_continuation_token
-    assert response2['StartAfter'] == 'bar'
-    assert response2['IsTruncated'] == False
-    key_names2 = ['foo', 'quxx']
-    keys = _get_keys(response2)
-    assert keys == key_names2
-
-def test_bucket_list_marker_unreadable():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
-    assert response['Marker'] == '\x0a'
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_startafter_unreadable():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
-    assert response['StartAfter'] == '\x0a'
-    assert response['IsTruncated'] == False
-    keys = _get_keys(response)
-    assert keys == key_names
-
-def test_bucket_list_marker_not_in_list():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Marker='blah')
-    assert response['Marker'] == 'blah'
-    keys = _get_keys(response)
-    assert keys == [ 'foo','quxx']
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_startafter_not_in_list():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
-    assert response['StartAfter'] == 'blah'
-    keys = _get_keys(response)
-    assert keys == ['foo', 'quxx']
-
-def test_bucket_list_marker_after_list():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects(Bucket=bucket_name, Marker='zzz')
-    assert response['Marker'] == 'zzz'
-    keys = _get_keys(response)
-    assert response['IsTruncated'] == False
-    assert keys == []
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_startafter_after_list():
-    key_names = ['bar', 'baz', 'foo', 'quxx']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
-    assert response['StartAfter'] == 'zzz'
-    keys = _get_keys(response)
-    assert response['IsTruncated'] == False
-    assert keys == []
-
-def _compare_dates(datetime1, datetime2):
-    """
-    changes ms from datetime1 to 0, compares it to datetime2
-    """
-    # both times are in datetime format but datetime1 has
-    # microseconds and datetime2 does not
-    datetime1 = datetime1.replace(microsecond=0)
-    assert datetime1 == datetime2
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_return_data():
-    key_names = ['bar', 'baz', 'foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    data = {}
-    for key_name in key_names:
-        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
-        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
-        data.update({
-            key_name: {
-                'DisplayName': acl_response['Owner']['DisplayName'],
-                'ID': acl_response['Owner']['ID'],
-                'ETag': obj_response['ETag'],
-                'LastModified': obj_response['LastModified'],
-                'ContentLength': obj_response['ContentLength'],
-                }
-            })
-
-    response  = client.list_objects(Bucket=bucket_name)
-    objs_list = response['Contents']
-    for obj in objs_list:
-        key_name = obj['Key']
-        key_data = data[key_name]
-        assert obj['ETag'] == key_data['ETag']
-        assert obj['Size'] == key_data['ContentLength']
-        assert obj['Owner']['DisplayName'] == key_data['DisplayName']
-        assert obj['Owner']['ID'] == key_data['ID']
-        _compare_dates(obj['LastModified'],key_data['LastModified'])
-
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_list_return_data_versioning():
-    bucket_name = get_new_bucket()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    key_names = ['bar', 'baz', 'foo']
-    bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
-
-    client = get_client()
-    data = {}
-
-    for key_name in key_names:
-        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
-        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
-        data.update({
-            key_name: {
-                'ID': acl_response['Owner']['ID'],
-                'DisplayName': acl_response['Owner']['DisplayName'],
-                'ETag': obj_response['ETag'],
-                'LastModified': obj_response['LastModified'],
-                'ContentLength': obj_response['ContentLength'],
-                'VersionId': obj_response['VersionId']
-                }
-            })
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    objs_list = response['Versions']
-
-    for obj in objs_list:
-        key_name = obj['Key']
-        key_data = data[key_name]
-        assert obj['Owner']['DisplayName'] == key_data['DisplayName']
-        assert obj['ETag'] == key_data['ETag']
-        assert obj['Size'] == key_data['ContentLength']
-        assert obj['Owner']['ID'] == key_data['ID']
-        assert obj['VersionId'] == key_data['VersionId']
-        _compare_dates(obj['LastModified'],key_data['LastModified'])
-
-def test_bucket_list_objects_anonymous():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-
-    unauthenticated_client = get_unauthenticated_client()
-    unauthenticated_client.list_objects(Bucket=bucket_name)
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_objects_anonymous():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-
-    unauthenticated_client = get_unauthenticated_client()
-    unauthenticated_client.list_objects_v2(Bucket=bucket_name)
-
-def test_bucket_list_objects_anonymous_fail():
-    bucket_name = get_new_bucket()
-
-    unauthenticated_client = get_unauthenticated_client()
-    e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.list_objects_v2
-def test_bucket_listv2_objects_anonymous_fail():
-    bucket_name = get_new_bucket()
-
-    unauthenticated_client = get_unauthenticated_client()
-    e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-def test_bucket_notexist():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-@pytest.mark.list_objects_v2
-def test_bucketv2_notexist():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_bucket_delete_notexist():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_bucket_delete_nonempty():
-    key_names = ['foo']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'BucketNotEmpty'
-
-def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
-    try:
-        client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
-        results[i] = True
-    except:
-        results[i] = False
-
-def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
-    t = []
-    for i in range(num):
-        thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
-        thr.start()
-        t.append(thr)
-    return t
-
-def _do_wait_completion(t):
-    for thr in t:
-        thr.join()
-
-def test_bucket_concurrent_set_canned_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
-                     # this seems like a large enough number to get through retry (if bug
-                     # exists)
-    results = [None] * num_threads
-
-    t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
-    _do_wait_completion(t)
-
-    for r in results:
-        assert r == True
-
-def test_object_write_to_nonexist_bucket():
-    key_names = ['foo']
-    bucket_name = 'whatchutalkinboutwillis'
-    client = get_client()
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-
-def _ev_add_te_header(request, **kwargs):
-    request.headers.add_header('Transfer-Encoding', 'chunked')
-
-def test_object_write_with_chunked_transfer_encoding():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.meta.events.register_first('before-sign.*.*', _ev_add_te_header)
-    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-def test_bucket_create_delete():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.delete_bucket(Bucket=bucket_name)
-
-    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_object_read_not_exist():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-http_response = None
-
-def get_http_response(**kwargs):
-    global http_response
-    http_response = kwargs['http_response'].__dict__
-
-@pytest.mark.fails_on_dbstore
-def test_object_requestid_matches_header_on_error():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # get http response after failed request
-    client.meta.events.register('after-call.s3.GetObject', get_http_response)
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
-
-    response_body = http_response['_content']
-    resp_body_xml = ET.fromstring(response_body)
-    request_id = resp_body_xml.find('.//RequestId').text
-
-    assert request_id is not None
-    assert request_id == e.response['ResponseMetadata']['RequestId']
-
-def _make_objs_dict(key_names):
-    objs_list = []
-    for key in key_names:
-        obj_dict = {'Key': key}
-        objs_list.append(obj_dict)
-    objs_dict = {'Objects': objs_list}
-    return objs_dict
-
-def test_versioning_concurrent_multi_object_delete():
-    num_objects = 5
-    num_threads = 5
-    bucket_name = get_new_bucket()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key_names = ["key_{:d}".format(x) for x in range(num_objects)]
-    bucket = _create_objects(bucket_name=bucket_name, keys=key_names)
-
-    client = get_client()
-    versions = client.list_object_versions(Bucket=bucket_name)['Versions']
-    assert len(versions) == num_objects
-    objs_dict = {'Objects': [dict((k, v[k]) for k in ["Key", "VersionId"]) for v in versions]}
-    results = [None] * num_threads
-
-    def do_request(n):
-        results[n] = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
-
-    t = []
-    for i in range(num_threads):
-        thr = threading.Thread(target = do_request, args=[i])
-        thr.start()
-        t.append(thr)
-    _do_wait_completion(t)
-
-    for response in results:
-        assert len(response['Deleted']) == num_objects
-        assert 'Errors' not in response
-
-    response = client.list_objects(Bucket=bucket_name)
-    assert 'Contents' not in response
-
-def test_multi_object_delete():
-    key_names = ['key0', 'key1', 'key2']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-    response = client.list_objects(Bucket=bucket_name)
-    assert len(response['Contents']) == 3
-
-    objs_dict = _make_objs_dict(key_names=key_names)
-    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
-
-    assert len(response['Deleted']) == 3
-    assert 'Errors' not in response
-    response = client.list_objects(Bucket=bucket_name)
-    assert 'Contents' not in response
-
-    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
-    assert len(response['Deleted']) == 3
-    assert 'Errors' not in response
-    response = client.list_objects(Bucket=bucket_name)
-    assert 'Contents' not in response
-
-@pytest.mark.list_objects_v2
-def test_multi_objectv2_delete():
-    key_names = ['key0', 'key1', 'key2']
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-    response = client.list_objects_v2(Bucket=bucket_name)
-    assert len(response['Contents']) == 3
-
-    objs_dict = _make_objs_dict(key_names=key_names)
-    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
-
-    assert len(response['Deleted']) == 3
-    assert 'Errors' not in response
-    response = client.list_objects_v2(Bucket=bucket_name)
-    assert 'Contents' not in response
-
-    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
-    assert len(response['Deleted']) == 3
-    assert 'Errors' not in response
-    response = client.list_objects_v2(Bucket=bucket_name)
-    assert 'Contents' not in response
-
-def test_multi_object_delete_key_limit():
-    key_names = [f"key-{i}" for i in range(1001)]
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    paginator = client.get_paginator('list_objects')
-    pages = paginator.paginate(Bucket=bucket_name)
-    numKeys = 0
-    for page in pages:
-        numKeys += len(page['Contents'])
-    assert numKeys == 1001
-
-    objs_dict = _make_objs_dict(key_names=key_names)
-    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-def test_multi_objectv2_delete_key_limit():
-    key_names = [f"key-{i}" for i in range(1001)]
-    bucket_name = _create_objects(keys=key_names)
-    client = get_client()
-
-    paginator = client.get_paginator('list_objects_v2')
-    pages = paginator.paginate(Bucket=bucket_name)
-    numKeys = 0
-    for page in pages:
-        numKeys += len(page['Contents'])
-    assert numKeys == 1001
-
-    objs_dict = _make_objs_dict(key_names=key_names)
-    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-def test_object_head_zero_bytes():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='')
-
-    response = client.head_object(Bucket=bucket_name, Key='foo')
-    assert response['ContentLength'] == 0
-
-def test_object_write_check_etag():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['ETag'] == '"37b51d194a7513e45b56f6524f2d51f2"'
-
-def test_object_write_cache_control():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    cache_control = 'public, max-age=14400'
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
-
-    response = client.head_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == cache_control
-
-def test_object_write_expires():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
-
-    response = client.head_object(Bucket=bucket_name, Key='foo')
-    _compare_dates(expires, response['Expires'])
-
-def _get_body(response):
-    body = response['Body']
-    got = body.read()
-    if type(got) is bytes:
-        got = got.decode()
-    return got
-
-def test_object_write_read_update_read_delete():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # Write
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    # Read
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-    # Update
-    client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
-    # Read
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'soup'
-    # Delete
-    client.delete_object(Bucket=bucket_name, Key='foo')
-
-def _set_get_metadata(metadata, bucket_name=None):
-    """
-    create a new bucket new or use an existing
-    name to create an object that bucket,
-    set the meta1 property to a specified, value,
-    and then re-read and return that property
-    """
-    if bucket_name is None:
-        bucket_name = get_new_bucket()
-
-    client = get_client()
-    metadata_dict = {'meta1': metadata}
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    return response['Metadata']['meta1']
-
-def test_object_set_get_metadata_none_to_good():
-    got = _set_get_metadata('mymeta')
-    assert got == 'mymeta'
-
-def test_object_set_get_metadata_none_to_empty():
-    got = _set_get_metadata('')
-    assert got == ''
-
-def test_object_set_get_metadata_overwrite_to_empty():
-    bucket_name = get_new_bucket()
-    got = _set_get_metadata('oldmeta', bucket_name)
-    assert got == 'oldmeta'
-    got = _set_get_metadata('', bucket_name)
-    assert got == ''
-
-# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
-@pytest.mark.fails_on_rgw
-def test_object_set_get_unicode_metadata():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    def set_unicode_metadata(**kwargs):
-        kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
-
-    client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    got = response['Metadata']['meta1']
-    print(got)
-    print(u"Hello World\xe9")
-    assert got == u"Hello World\xe9"
-
-def _set_get_metadata_unreadable(metadata, bucket_name=None):
-    """
-    set and then read back a meta-data value (which presumably
-    includes some interesting characters), and return a list
-    containing the stored value AND the encoding with which it
-    was returned.
-
-    This should return a 400 bad request because the webserver
-    rejects the request.
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-    metadata_dict = {'meta1': metadata}
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
-    return e
-
-def test_object_metadata_replaced_on_put():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    metadata_dict = {'meta1': 'bar'}
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
-
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    got = response['Metadata']
-    assert got == {}
-
-def test_object_write_file():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data_str = 'bar'
-    data = bytes(data_str, 'utf-8')
-    client.put_object(Bucket=bucket_name, Key='foo', Body=data)
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def _get_post_url(bucket_name):
-    endpoint = get_config_endpoint()
-    return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
-
-def test_post_object_anonymous_request():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    url = _get_post_url(bucket_name)
-    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_post_object_authenticated_request():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_post_object_authenticated_no_content_type():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key="foo.txt")
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_post_object_authenticated_request_bad_access_key():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_set_success_code():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-    url = _get_post_url(bucket_name)
-    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
-    ("success_action_status" , "201"),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 201
-    message = ET.fromstring(r.content).find('Key')
-    assert message.text == 'foo.txt'
-
-def test_post_object_set_invalid_success_code():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-    url = _get_post_url(bucket_name)
-    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
-    ("success_action_status" , "404"),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    content = r.content.decode()
-    assert content == ''
-
-def test_post_object_upload_larger_than_chunk():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 5*1024*1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    foo_string = 'foo' * 1024*1024
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', foo_string)])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == foo_string
-
-def test_post_object_set_key_from_filename():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_post_object_ignored_header():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-
-def test_post_object_case_insensitive_condition_fields():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bUcKeT": bucket_name},\
-    ["StArTs-WiTh", "$KeY", "foo"],\
-    {"AcL": "private"},\
-    ["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    foo_string = 'foo' * 1024*1024
-
-    payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-
-def test_post_object_escaped_field_values():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_post_object_success_redirect_action():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-    url = _get_post_url(bucket_name)
-    redirect_url = _get_post_url(bucket_name)
-
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["eq", "$success_action_redirect", redirect_url],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
-    ('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 200
-    url = r.url
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    assert url == '{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(\
-    rurl = redirect_url, bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"'))
-
-def test_post_object_invalid_signature():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
-
-    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_invalid_access_key():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_invalid_date_format():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": str(expires),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_no_key_specified():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_missing_signature():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_missing_policy_condition():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    ["starts-with", "$key", "\$foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_user_specified_header():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ["starts-with", "$x-amz-meta-foo",  "bar"]
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    assert response['Metadata']['foo'] == 'barclamp'
-
-def test_post_object_request_missing_policy_specified_field():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ["starts-with", "$x-amz-meta-foo",  "bar"]
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_condition_is_case_sensitive():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "CONDITIONS": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_expires_is_case_sensitive():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_expired_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_wrong_bucket():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "${filename}"),('bucket', bucket_name),\
-    ("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
-
-    bad_bucket_name = get_new_bucket()
-    url = _get_post_url(bad_bucket_name)
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_invalid_request_field_value():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ["eq", "$x-amz-meta-foo",  ""]
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 403
-
-def test_post_object_missing_expires_condition():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 1024],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_missing_conditions_list():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_upload_size_limit_exceeded():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0, 0],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_missing_content_length_argument():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 0],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_invalid_content_length_argument():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", -1, 0],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_upload_size_below_minimum():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", 512, 1000],\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_post_object_upload_size_rgw_chunk_size_bug():
-    # Test for https://tracker.ceph.com/issues/58627
-    # TODO: if this value is different in Teuthology runs, this would need tuning
-    # https://github.com/ceph/ceph/blob/main/qa/suites/rgw/verify/striping%24/stripe-greater-than-chunk.yaml
-    _rgw_max_chunk_size = 4 * 2**20 # 4MiB
-    min_size = _rgw_max_chunk_size
-    max_size = _rgw_max_chunk_size * 3
-    # [(chunk),(small)]
-    test_payload_size = _rgw_max_chunk_size + 200 # extra bit to push it over the chunk boundary
-    # it should be valid when we run this test!
-    assert test_payload_size > min_size
-    assert test_payload_size < max_size
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["content-length-range", min_size, max_size],\
-    ]\
-    }
-
-    test_payload = 'x' * test_payload_size
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', (test_payload))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-
-def test_post_object_empty_conditions():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    { }\
-    ]\
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 400
-
-def test_get_object_ifmatch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    etag = response['ETag']
-
-    response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_get_object_ifmatch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-def test_get_object_ifnonematch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    etag = response['ETag']
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 304
-    assert e.response['Error']['Message'] == 'Not Modified'
-    assert e.response['ResponseMetadata']['HTTPHeaders']['etag'] == etag
-
-def test_get_object_ifnonematch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def test_get_object_ifmodifiedsince_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.fails_on_dbstore
-def test_get_object_ifmodifiedsince_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    etag = response['ETag']
-    last_modified = str(response['LastModified'])
-
-    last_modified = last_modified.split('+')[0]
-    mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
-
-    after = mtime + datetime.timedelta(seconds=1)
-    after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
-
-    time.sleep(1)
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 304
-    assert e.response['Error']['Message'] == 'Not Modified'
-    assert e.response['ResponseMetadata']['HTTPHeaders']['etag'] == etag
-
-@pytest.mark.fails_on_dbstore
-def test_get_object_ifunmodifiedsince_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-def test_get_object_ifunmodifiedsince_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
-    body = _get_body(response)
-    assert body == 'bar'
-
-
-@pytest.mark.fails_on_aws
-def test_put_object_ifmatch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    etag = response['ETag'].replace('"', '')
-
-    # pass in custom header 'If-Match' before PutObject call
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'zar'
-
-@pytest.mark.fails_on_dbstore
-def test_put_object_ifmatch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    # pass in custom header 'If-Match' before PutObject call
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.fails_on_aws
-def test_put_object_ifmatch_overwrite_existed_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'zar'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_put_object_ifmatch_nonexisted_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-@pytest.mark.fails_on_aws
-def test_put_object_ifnonmatch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'zar'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_put_object_ifnonmatch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    etag = response['ETag'].replace('"', '')
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.fails_on_aws
-def test_put_object_ifnonmatch_nonexisted_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_put_object_ifnonmatch_overwrite_existed_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-def _setup_bucket_object_acl(bucket_acl, object_acl, client=None):
-    """
-    add a foo key, and specified key and bucket acls to
-    a (new or existing) bucket.
-    """
-    if client is None:
-        client = get_client()
-    bucket_name = get_new_bucket_name()
-    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
-    client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
-
-    return bucket_name
-
-def _setup_bucket_acl(bucket_acl=None):
-    """
-    set up a new bucket with specified acl
-    """
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
-
-    return bucket_name
-
-def test_object_raw_get():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-
-    unauthenticated_client = get_unauthenticated_client()
-    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_raw_get_bucket_gone():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-
-    client.delete_object(Bucket=bucket_name, Key='foo')
-    client.delete_bucket(Bucket=bucket_name)
-
-    unauthenticated_client = get_unauthenticated_client()
-
-    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_object_delete_key_bucket_gone():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-
-    client.delete_object(Bucket=bucket_name, Key='foo')
-    client.delete_bucket(Bucket=bucket_name)
-
-    unauthenticated_client = get_unauthenticated_client()
-
-    e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_object_raw_get_object_gone():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-
-    client.delete_object(Bucket=bucket_name, Key='foo')
-
-    unauthenticated_client = get_unauthenticated_client()
-
-    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-def test_bucket_head():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.head_bucket(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_bucket_head_notexist():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name)
-
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    # n.b., RGW does not send a response document for this operation,
-    # which seems consistent with
-    # https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
-    #assert error_code == 'NoSuchKey'
-
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_bucket_head_extended():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.head_bucket(Bucket=bucket_name)
-    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 0
-    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 0
-
-    _create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
-    response = client.head_bucket(Bucket=bucket_name)
-
-    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']) == 3
-    assert int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']) == 9
-
-def test_object_raw_get_bucket_acl():
-    bucket_name = _setup_bucket_object_acl('private', 'public-read')
-
-    unauthenticated_client = get_unauthenticated_client()
-    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_raw_get_object_acl():
-    bucket_name = _setup_bucket_object_acl('public-read', 'private')
-
-    unauthenticated_client = get_unauthenticated_client()
-    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-def test_object_put_acl_mtime():
-    key = 'foo'
-    bucket_name = get_new_bucket()
-    # Enable versioning
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    client = get_client()
-
-    content = 'foooz'
-    client.put_object(Bucket=bucket_name, Key=key, Body=content)
-    
-    obj_response = client.head_object(Bucket=bucket_name, Key=key)
-    create_mtime = obj_response['LastModified']
-
-    response  = client.list_objects(Bucket=bucket_name)
-    obj_list = response['Contents'][0]
-    _compare_dates(obj_list['LastModified'],create_mtime)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    obj_list = response['Versions'][0]
-    _compare_dates(obj_list['LastModified'],create_mtime)
-
-    # set acl
-    time.sleep(2)
-    client.put_object_acl(ACL='private',Bucket=bucket_name, Key=key)
-    
-    # mtime should match with create mtime
-    obj_response = client.head_object(Bucket=bucket_name, Key=key)
-    _compare_dates(create_mtime,obj_response['LastModified'])
-
-    response  = client.list_objects(Bucket=bucket_name)
-    obj_list = response['Contents'][0]
-    _compare_dates(obj_list['LastModified'],create_mtime)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    obj_list = response['Versions'][0]
-    _compare_dates(obj_list['LastModified'],create_mtime)
-
-def test_object_raw_authenticated():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-
-    client = get_client()
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_raw_response_headers():
-    bucket_name = _setup_bucket_object_acl('private', 'private')
-
-    client = get_client()
-
-    response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == 'foo/bar'
-    assert response['ResponseMetadata']['HTTPHeaders']['content-disposition'] == 'bla'
-    assert response['ResponseMetadata']['HTTPHeaders']['content-language'] == 'esperanto'
-    assert response['ResponseMetadata']['HTTPHeaders']['content-encoding'] == 'aaa'
-    assert response['ResponseMetadata']['HTTPHeaders']['cache-control'] == 'no-cache'
-
-def test_object_raw_authenticated_bucket_acl():
-    bucket_name = _setup_bucket_object_acl('private', 'public-read')
-
-    client = get_client()
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_raw_authenticated_object_acl():
-    bucket_name = _setup_bucket_object_acl('public-read', 'private')
-
-    client = get_client()
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_raw_authenticated_bucket_gone():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-
-    client.delete_object(Bucket=bucket_name, Key='foo')
-    client.delete_bucket(Bucket=bucket_name)
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_object_raw_authenticated_object_gone():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-
-    client.delete_object(Bucket=bucket_name, Key='foo')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-def _test_object_raw_get_x_amz_expires_not_expired(client):
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read', client=client)
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-
-    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
-
-    res = requests.options(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 400
-
-    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 200
-
-def test_object_raw_get_x_amz_expires_not_expired():
-    _test_object_raw_get_x_amz_expires_not_expired(client=get_client())
-
-def test_object_raw_get_x_amz_expires_not_expired_tenant():
-    _test_object_raw_get_x_amz_expires_not_expired(client=get_tenant_client())
-
-def test_object_raw_get_x_amz_expires_out_range_zero():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-
-    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
-
-    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 403
-
-def test_object_raw_get_x_amz_expires_out_max_range():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-
-    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
-
-    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 403
-
-def test_object_raw_get_x_amz_expires_out_positive_range():
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
-    client = get_client()
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-
-    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
-
-    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 403
-
-
-def test_object_anon_put():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='foo')
-
-    unauthenticated_client = get_unauthenticated_client()
-
-    e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-def test_object_anon_put_write_access():
-    bucket_name = _setup_bucket_acl('public-read-write')
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo')
-
-    unauthenticated_client = get_unauthenticated_client()
-
-    response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def test_object_put_authenticated():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def _test_object_presigned_put_object_with_acl(client=None):
-    if client is None:
-        client = get_client()
-
-    bucket_name = get_new_bucket(client)
-    key = 'foo'
-
-    params = {'Bucket': bucket_name, 'Key': key, 'ACL': 'private'}
-    url = client.generate_presigned_url(ClientMethod='put_object', Params=params, HttpMethod='PUT')
-
-    data = b'hello world'
-    headers = {'x-amz-acl': 'private'}
-    res = requests.put(url, data=data, headers=headers, verify=get_config_ssl_verify())
-    assert res.status_code == 200
-
-    params = {'Bucket': bucket_name, 'Key': key}
-    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, HttpMethod='GET')
-
-    res = requests.get(url, verify=get_config_ssl_verify())
-    assert res.status_code == 200
-    assert res.text == 'hello world'
-
-def test_object_presigned_put_object_with_acl():
-    _test_object_presigned_put_object_with_acl(
-        client=get_client())
-
-def test_object_presigned_put_object_with_acl_tenant():
-    _test_object_presigned_put_object_with_acl(
-        client=get_tenant_client())
-
-def test_object_raw_put_authenticated_expired():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo')
-
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-    url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
-
-    # params wouldn't take a 'Body' parameter so we're passing it in here
-    res = requests.put(url, data="foo", verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 403
-
-def check_bad_bucket_name(bucket_name):
-    """
-    Attempt to create a bucket with a specified name, and confirm
-    that the request fails because of an invalid bucket name.
-    """
-    client = get_client()
-    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-
-# AWS does not enforce all documented bucket restrictions.
-# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
-@pytest.mark.fails_on_aws
-# Breaks DNS with SubdomainCallingFormat
-def test_bucket_create_naming_bad_starts_nonalpha():
-    bucket_name = get_new_bucket_name()
-    check_bad_bucket_name('_' + bucket_name)
-
-def check_invalid_bucketname(invalid_name):
-    """
-    Send a create bucket_request with an invalid bucket name
-    that will bypass the ParamValidationError that would be raised
-    if the invalid bucket name that was passed in normally.
-    This function returns the status and error code from the failure
-    """
-    client = get_client()
-    valid_bucket_name = get_new_bucket_name()
-    def replace_bucketname_from_url(**kwargs):
-        url = kwargs['params']['url']
-        new_url = url.replace(valid_bucket_name, invalid_name)
-        kwargs['params']['url'] = new_url
-    client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
-    e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    return (status, error_code)
-
-def test_bucket_create_naming_bad_short_one():
-    check_bad_bucket_name('a')
-
-def test_bucket_create_naming_bad_short_two():
-    check_bad_bucket_name('aa')
-
-def check_good_bucket_name(name, _prefix=None):
-    """
-    Attempt to create a bucket with a specified name
-    and (specified or default) prefix, returning the
-    results of that effort.
-    """
-    # tests using this with the default prefix must *not* rely on
-    # being able to set the initial character, or exceed the max len
-
-    # tests using this with a custom prefix are responsible for doing
-    # their own setup/teardown nukes, with their custom prefix; this
-    # should be very rare
-    if _prefix is None:
-        _prefix = get_prefix()
-    bucket_name = '{prefix}{name}'.format(
-            prefix=_prefix,
-            name=name,
-            )
-    client = get_client()
-    response = client.create_bucket(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def _test_bucket_create_naming_good_long(length):
-    """
-    Attempt to create a bucket whose name (including the
-    prefix) is of a specified length.
-    """
-    # tests using this with the default prefix must *not* rely on
-    # being able to set the initial character, or exceed the max len
-
-    # tests using this with a custom prefix are responsible for doing
-    # their own setup/teardown nukes, with their custom prefix; this
-    # should be very rare
-    prefix = get_new_bucket_name()
-    assert len(prefix) < 63
-    num = length - len(prefix)
-    name=num*'a'
-
-    bucket_name = '{prefix}{name}'.format(
-            prefix=prefix,
-            name=name,
-            )
-    client = get_client()
-    response = client.create_bucket(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_good_long_60():
-    _test_bucket_create_naming_good_long(60)
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_good_long_61():
-    _test_bucket_create_naming_good_long(61)
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_good_long_62():
-    _test_bucket_create_naming_good_long(62)
-
-
-# Breaks DNS with SubdomainCallingFormat
-def test_bucket_create_naming_good_long_63():
-    _test_bucket_create_naming_good_long(63)
-
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_list_long_name():
-    prefix = get_new_bucket_name()
-    length = 61
-    num = length - len(prefix)
-    name=num*'a'
-
-    bucket_name = '{prefix}{name}'.format(
-            prefix=prefix,
-            name=name,
-            )
-    bucket = get_new_bucket_resource(name=bucket_name)
-    is_empty = _bucket_is_empty(bucket)
-    assert is_empty == True
-
-# AWS does not enforce all documented bucket restrictions.
-# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
-@pytest.mark.fails_on_aws
-def test_bucket_create_naming_bad_ip():
-    check_bad_bucket_name('192.168.5.123')
-
-# test_bucket_create_naming_dns_* are valid but not recommended
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_dns_underscore():
-    invalid_bucketname = 'foo_bar'
-    status, error_code = check_invalid_bucketname(invalid_bucketname)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-def test_bucket_create_naming_dns_long():
-    prefix = get_prefix()
-    assert len(prefix) < 50
-    num = 63 - len(prefix)
-    check_good_bucket_name(num * 'a')
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_dns_dash_at_end():
-    invalid_bucketname = 'foo-'
-    status, error_code = check_invalid_bucketname(invalid_bucketname)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_dns_dot_dot():
-    invalid_bucketname = 'foo..bar'
-    status, error_code = check_invalid_bucketname(invalid_bucketname)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_dns_dot_dash():
-    invalid_bucketname = 'foo.-bar'
-    status, error_code = check_invalid_bucketname(invalid_bucketname)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-
-# Breaks DNS with SubdomainCallingFormat
-@pytest.mark.fails_on_aws
-# Should now pass on AWS even though it has 'fails_on_aws' attr.
-def test_bucket_create_naming_dns_dash_dot():
-    invalid_bucketname = 'foo-.bar'
-    status, error_code = check_invalid_bucketname(invalid_bucketname)
-    assert status == 400
-    assert error_code == 'InvalidBucketName'
-
-def test_bucket_create_exists():
-    # aws-s3 default region allows recreation of buckets
-    # but all other regions fail with BucketAlreadyOwnedByYou.
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    client.create_bucket(Bucket=bucket_name)
-    try:
-        response = client.create_bucket(Bucket=bucket_name)
-    except ClientError as e:
-        status, error_code = _get_status_and_error_code(e.response)
-        assert e.status == 409
-        assert e.error_code == 'BucketAlreadyOwnedByYou'
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_get_location():
-    location_constraint = get_main_api_name()
-    if not location_constraint:
-        pytest.skip('no api_name configured')
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
-
-    response = client.get_bucket_location(Bucket=bucket_name)
-    if location_constraint == "":
-        location_constraint = None
-    assert response['LocationConstraint'] == location_constraint
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_create_exists_nonowner():
-    # Names are shared across a global namespace. As such, no two
-    # users can create a bucket with that same name.
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    alt_client = get_alt_client()
-
-    client.create_bucket(Bucket=bucket_name)
-    e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'BucketAlreadyExists'
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_recreate_overwrite_acl():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    client.create_bucket(Bucket=bucket_name, ACL='public-read')
-    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'BucketAlreadyExists'
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_recreate_new_acl():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    client.create_bucket(Bucket=bucket_name)
-    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'BucketAlreadyExists'
-
-def check_access_denied(fn, *args, **kwargs):
-    e = assert_raises(ClientError, fn, *args, **kwargs)
-    status = _get_status(e.response)
-    assert status == 403
-
-
-def check_grants(got, want):
-    """
-    Check that grants list in got matches the dictionaries in want,
-    in any order.
-    """
-    assert len(got) == len(want)
-
-    # There are instances when got does not match due the order of item.
-    if got[0]["Grantee"].get("DisplayName"):
-        got.sort(key=lambda x: x["Grantee"].get("DisplayName"))
-        want.sort(key=lambda x: x["DisplayName"])
-
-    for g, w in zip(got, want):
-        w = dict(w)
-        g = dict(g)
-        assert g.pop('Permission', None) == w['Permission']
-        assert g['Grantee'].pop('DisplayName', None) == w['DisplayName']
-        assert g['Grantee'].pop('ID', None) == w['ID']
-        assert g['Grantee'].pop('Type', None) == w['Type']
-        assert g['Grantee'].pop('URI', None) == w['URI']
-        assert g['Grantee'].pop('EmailAddress', None) == w['EmailAddress']
-        assert g == {'Grantee': {}}
-
-
-def test_bucket_acl_default():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    assert response['Owner']['DisplayName'] == display_name
-    assert response['Owner']['ID'] == user_id
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_canned_during_create():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read', Bucket=bucket_name)
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_bucket_acl_canned():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read', Bucket=bucket_name)
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-    client.put_bucket_acl(ACL='private', Bucket=bucket_name)
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_bucket_acl_canned_publicreadwrite():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='WRITE',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_bucket_acl_canned_authenticatedread():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_put_bucket_acl_grant_group_read():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grant = {'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'}, 'Permission': 'READ'}
-    policy = add_bucket_user_grant(bucket_name, grant)
-
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    check_grants(
-        response['Grants'],
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_default():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned_during_create():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # Since it defaults to private, set it public-read first
-    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-    # Then back to private.
-    client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-    grants = response['Grants']
-
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned_publicreadwrite():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='WRITE',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned_authenticatedread():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned_bucketownerread():
-    bucket_name = get_new_bucket_name()
-    main_client = get_client()
-    alt_client = get_alt_client()
-
-    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
-
-    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
-    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
-    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
-
-    alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
-    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    alt_display_name = get_alt_display_name()
-    alt_user_id = get_alt_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='READ',
-                ID=bucket_owner_id,
-                DisplayName=bucket_owner_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def test_object_acl_canned_bucketownerfullcontrol():
-    bucket_name = get_new_bucket_name()
-    main_client = get_client()
-    alt_client = get_alt_client()
-
-    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
-
-    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
-    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
-    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
-
-    alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
-    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    alt_display_name = get_alt_display_name()
-    alt_user_id = get_alt_user_id()
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=bucket_owner_id,
-                DisplayName=bucket_owner_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-@pytest.mark.fails_on_aws
-def test_object_acl_full_control_verify_owner():
-    bucket_name = get_new_bucket_name()
-    main_client = get_client()
-    alt_client = get_alt_client()
-
-    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
-
-    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
-
-    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
-
-    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
-
-    alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
-
-    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
-    assert response['Owner']['ID'] == main_user_id
-
-def add_obj_user_grant(bucket_name, key, grant):
-    """
-    Adds a grant to the existing grants meant to be passed into
-    the AccessControlPolicy argument of put_object_acls for an object
-    owned by the main user, not the alt user
-    A grant is a dictionary in the form of:
-    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
-
-    """
-    client = get_client()
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key)
-
-    grants = response['Grants']
-    grants.append(grant)
-
-    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
-
-    return grant
-
-def test_object_acl_full_control_verify_attributes():
-    bucket_name = get_new_bucket_name()
-    main_client = get_client()
-    alt_client = get_alt_client()
-
-    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
-
-    header = {'x-amz-foo': 'bar'}
-    # lambda to add any header
-    add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
-
-    main_client.meta.events.register('before-call.s3.PutObject', add_header)
-    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = main_client.get_object(Bucket=bucket_name, Key='foo')
-    content_type = response['ContentType']
-    etag = response['ETag']
-
-    alt_user_id = get_alt_user_id()
-
-    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
-
-    grants = add_obj_user_grant(bucket_name, 'foo', grant)
-
-    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
-
-    response = main_client.get_object(Bucket=bucket_name, Key='foo')
-    assert content_type == response['ContentType']
-    assert etag == response['ETag']
-
-def test_bucket_acl_canned_private_to_private():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def add_bucket_user_grant(bucket_name, grant):
-    """
-    Adds a grant to the existing grants meant to be passed into
-    the AccessControlPolicy argument of put_object_acls for an object
-    owned by the main user, not the alt user
-    A grant is a dictionary in the form of:
-    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
-    """
-    client = get_client()
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    grants = response['Grants']
-    grants.append(grant)
-
-    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
-
-    return grant
-
-def _check_object_acl(permission):
-    """
-    Sets the permission on an object then checks to see
-    if it was set
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-
-    policy = {}
-    policy['Owner'] = response['Owner']
-    policy['Grants'] = response['Grants']
-    policy['Grants'][0]['Permission'] = permission
-
-    client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
-
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
-    grants = response['Grants']
-
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission=permission,
-                ID=main_user_id,
-                DisplayName=main_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-
-@pytest.mark.fails_on_aws
-def test_object_acl():
-    _check_object_acl('FULL_CONTROL')
-
-@pytest.mark.fails_on_aws
-def test_object_acl_write():
-    _check_object_acl('WRITE')
-
-@pytest.mark.fails_on_aws
-def test_object_acl_writeacp():
-    _check_object_acl('WRITE_ACP')
-
-
-@pytest.mark.fails_on_aws
-def test_object_acl_read():
-    _check_object_acl('READ')
-
-
-@pytest.mark.fails_on_aws
-def test_object_acl_readacp():
-    _check_object_acl('READ_ACP')
-
-
-def _bucket_acl_grant_userid(permission):
-    """
-    create a new bucket, grant a specific user the specified
-    permission, read back the acl and verify correct setting
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-
-    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
-
-    grant = add_bucket_user_grant(bucket_name, grant)
-
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission=permission,
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=main_user_id,
-                DisplayName=main_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-    return bucket_name
-
-def _check_bucket_acl_grant_can_read(bucket_name):
-    """
-    verify ability to read the specified bucket
-    """
-    alt_client = get_alt_client()
-    response = alt_client.head_bucket(Bucket=bucket_name)
-
-def _check_bucket_acl_grant_cant_read(bucket_name):
-    """
-    verify inability to read the specified bucket
-    """
-    alt_client = get_alt_client()
-    check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
-
-def _check_bucket_acl_grant_can_readacp(bucket_name):
-    """
-    verify ability to read acls on specified bucket
-    """
-    alt_client = get_alt_client()
-    alt_client.get_bucket_acl(Bucket=bucket_name)
-
-def _check_bucket_acl_grant_cant_readacp(bucket_name):
-    """
-    verify inability to read acls on specified bucket
-    """
-    alt_client = get_alt_client()
-    check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
-
-def _check_bucket_acl_grant_can_write(bucket_name):
-    """
-    verify ability to write the specified bucket
-    """
-    alt_client = get_alt_client()
-    alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
-
-def _check_bucket_acl_grant_cant_write(bucket_name):
-
-    """
-    verify inability to write the specified bucket
-    """
-    alt_client = get_alt_client()
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
-
-def _check_bucket_acl_grant_can_writeacp(bucket_name):
-    """
-    verify ability to set acls on the specified bucket
-    """
-    alt_client = get_alt_client()
-    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-
-def _check_bucket_acl_grant_cant_writeacp(bucket_name):
-    """
-    verify inability to set acls on the specified bucket
-    """
-    alt_client = get_alt_client()
-    check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_userid_fullcontrol():
-    bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
-
-    # alt user can read
-    _check_bucket_acl_grant_can_read(bucket_name)
-    # can read acl
-    _check_bucket_acl_grant_can_readacp(bucket_name)
-    # can write
-    _check_bucket_acl_grant_can_write(bucket_name)
-    # can write acl
-    _check_bucket_acl_grant_can_writeacp(bucket_name)
-
-    client = get_client()
-
-    bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
-    owner_id = bucket_acl_response['Owner']['ID']
-    owner_display_name = bucket_acl_response['Owner']['DisplayName']
-
-    main_display_name = get_main_display_name()
-    main_user_id = get_main_user_id()
-
-    assert owner_id == main_user_id
-    assert owner_display_name == main_display_name
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_userid_read():
-    bucket_name = _bucket_acl_grant_userid('READ')
-
-    # alt user can read
-    _check_bucket_acl_grant_can_read(bucket_name)
-    # can't read acl
-    _check_bucket_acl_grant_cant_readacp(bucket_name)
-    # can't write
-    _check_bucket_acl_grant_cant_write(bucket_name)
-    # can't write acl
-    _check_bucket_acl_grant_cant_writeacp(bucket_name)
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_userid_readacp():
-    bucket_name = _bucket_acl_grant_userid('READ_ACP')
-
-    # alt user can't read
-    _check_bucket_acl_grant_cant_read(bucket_name)
-    # can read acl
-    _check_bucket_acl_grant_can_readacp(bucket_name)
-    # can't write
-    _check_bucket_acl_grant_cant_write(bucket_name)
-    # can't write acp
-    #_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
-    _check_bucket_acl_grant_cant_writeacp(bucket_name)
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_userid_write():
-    bucket_name = _bucket_acl_grant_userid('WRITE')
-
-    # alt user can't read
-    _check_bucket_acl_grant_cant_read(bucket_name)
-    # can't read acl
-    _check_bucket_acl_grant_cant_readacp(bucket_name)
-    # can write
-    _check_bucket_acl_grant_can_write(bucket_name)
-    # can't write acl
-    _check_bucket_acl_grant_cant_writeacp(bucket_name)
-
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_userid_writeacp():
-    bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
-
-    # alt user can't read
-    _check_bucket_acl_grant_cant_read(bucket_name)
-    # can't read acl
-    _check_bucket_acl_grant_cant_readacp(bucket_name)
-    # can't write
-    _check_bucket_acl_grant_cant_write(bucket_name)
-    # can write acl
-    _check_bucket_acl_grant_can_writeacp(bucket_name)
-
-def test_bucket_acl_grant_nonexist_user():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    bad_user_id = '_foo'
-
-    #response = client.get_bucket_acl(Bucket=bucket_name)
-    grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
-
-    grant = add_bucket_user_grant(bucket_name, grant)
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-def _get_acl_header(user_id=None, perms=None):
-    all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
-    headers = []
-
-    if user_id == None:
-        user_id = get_alt_user_id()
-
-    if perms != None:
-        for perm in perms:
-            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
-            headers.append(header)
-
-    else:
-        for perm in all_headers:
-            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
-            headers.append(header)
-
-    return headers
-
-@pytest.mark.fails_on_dho
-@pytest.mark.fails_on_aws
-def test_object_header_acl_grants():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-
-    headers = _get_acl_header()
-
-    def add_headers_before_sign(**kwargs):
-        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
-        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
-
-    client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
-
-    client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
-
-    response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='WRITE',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='READ_ACP',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='WRITE_ACP',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-@pytest.mark.fails_on_dho
-@pytest.mark.fails_on_aws
-def test_bucket_header_acl_grants():
-    headers = _get_acl_header()
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-
-    headers = _get_acl_header()
-
-    def add_headers_before_sign(**kwargs):
-        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
-        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
-
-    client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
-
-    client.create_bucket(Bucket=bucket_name)
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    grants = response['Grants']
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='WRITE',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='READ_ACP',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='WRITE_ACP',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-    alt_client = get_alt_client()
-
-    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    # set bucket acl to public-read-write so that teardown can work
-    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
-
-
-# This test will fail on DH Objects. DHO allows multiple users with one account, which
-# would violate the uniqueness requirement of a user's email. As such, DHO users are
-# created without an email.
-@pytest.mark.fails_on_aws
-def test_bucket_acl_grant_email():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-    alt_email_address = get_alt_email()
-
-    main_user_id = get_main_user_id()
-    main_display_name = get_main_display_name()
-
-    grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
-
-    grant = add_bucket_user_grant(bucket_name, grant)
-
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=alt_user_id,
-                DisplayName=alt_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=main_user_id,
-                DisplayName=main_display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-        ]
-    )
-
-def test_bucket_acl_grant_email_not_exist():
-    # behavior not documented by amazon
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    alt_user_id = get_alt_user_id()
-    alt_display_name = get_alt_display_name()
-    alt_email_address = get_alt_email()
-
-    NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
-    grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
-
-    grant = add_bucket_user_grant(bucket_name, grant)
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'UnresolvableGrantByEmailAddress'
-
-def test_bucket_acl_revoke_all():
-    # revoke all access, including the owner's access
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-    response = client.get_bucket_acl(Bucket=bucket_name)
-    old_grants = response['Grants']
-    policy = {}
-    policy['Owner'] = response['Owner']
-    # clear grants
-    policy['Grants'] = []
-
-    # remove read/write permission for everyone
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
-
-    response = client.get_bucket_acl(Bucket=bucket_name)
-
-    assert len(response['Grants']) == 0
-
-    # set policy back to original so that bucket can be cleaned up
-    policy['Grants'] = old_grants
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
-
-# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
-# http://tracker.newdream.net/issues/984
-@pytest.mark.fails_on_rgw
-def test_logging_toggle():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    main_display_name = get_main_display_name()
-    main_user_id = get_main_user_id()
-
-    status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
-
-    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
-    client.get_bucket_logging(Bucket=bucket_name)
-    status = {'LoggingEnabled': {}}
-    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
-    # NOTE: this does not actually test whether or not logging works
-
-def _setup_access(bucket_acl, object_acl):
-    """
-    Simple test fixture: create a bucket with given ACL, with objects:
-    - a: owning user, given ACL
-    - a2: same object accessed by some other user
-    - b: owning user, default ACL in bucket w/given ACL
-    - b2: same object accessed by a some other user
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    key1 = 'foo'
-    key2 = 'bar'
-    newkey = 'new'
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
-    client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
-    client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
-    client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
-
-    return bucket_name, key1, key2, newkey
-
-def get_bucket_key_names(bucket_name):
-    objs_list = get_objects_list(bucket_name)
-    return frozenset(obj for obj in objs_list)
-
-def list_bucket_storage_class(client, bucket_name):
-    result = defaultdict(list)
-    response  = client.list_object_versions(Bucket=bucket_name)
-    for k in response['Versions']:
-        result[k['StorageClass']].append(k)
-
-    return result
-
-def list_bucket_versions(client, bucket_name):
-    result = defaultdict(list)
-    response  = client.list_object_versions(Bucket=bucket_name)
-    for k in response['Versions']:
-        result[response['Name']].append(k)
-
-    return result
-
-def test_access_bucket_private_object_private():
-    # all the test_access_* tests follow this template
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
-
-    alt_client = get_alt_client()
-    # acled object read fail
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
-    # default object read fail
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
-    # bucket read fail
-    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
-
-    # acled object write fail
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
-    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
-    # anymore. This can be solved either by:
-    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
-    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
-    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
-
-    alt_client2 = get_alt_client()
-    # default object write fail
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-    # bucket write fail
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-@pytest.mark.list_objects_v2
-def test_access_bucket_private_objectv2_private():
-    # all the test_access_* tests follow this template
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
-
-    alt_client = get_alt_client()
-    # acled object read fail
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
-    # default object read fail
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
-    # bucket read fail
-    check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
-
-    # acled object write fail
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
-    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
-    # anymore. This can be solved either by:
-    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
-    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
-    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
-
-    alt_client2 = get_alt_client()
-    # default object write fail
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-    # bucket write fail
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_private_object_publicread():
-
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-
-    # a should be public-read, b gets default (private)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-@pytest.mark.list_objects_v2
-def test_access_bucket_private_objectv2_publicread():
-
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-
-    # a should be public-read, b gets default (private)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_private_object_publicreadwrite():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-
-    # a should be public-read-only ... because it is in a private bucket
-    # b gets default (private)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-@pytest.mark.list_objects_v2
-def test_access_bucket_private_objectv2_publicreadwrite():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-
-    # a should be public-read-only ... because it is in a private bucket
-    # b gets default (private)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_publicread_object_private():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
-    alt_client = get_alt_client()
-
-    # a should be private, b gets default (private)
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
-
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-
-    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
-
-    assert objs == ['bar', 'foo']
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_publicread_object_publicread():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
-    alt_client = get_alt_client()
-
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    # a should be public-read, b gets default (private)
-    body = _get_body(response)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-
-    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
-
-    assert objs == ['bar', 'foo']
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-
-def test_access_bucket_publicread_object_publicreadwrite():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
-    alt_client = get_alt_client()
-
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-
-    # a should be public-read-only ... because it is in a r/o bucket
-    # b gets default (private)
-    assert body == 'foocontent'
-
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
-
-    alt_client2 = get_alt_client()
-    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
-    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    alt_client3 = get_alt_client()
-
-    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
-
-    assert objs == ['bar', 'foo']
-    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-
-def test_access_bucket_publicreadwrite_object_private():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
-    alt_client = get_alt_client()
-
-    # a should be private, b gets default (private)
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
-    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
-
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
-    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    objs = get_objects_list(bucket=bucket_name, client=alt_client)
-    assert objs == ['bar', 'foo']
-    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_publicreadwrite_object_publicread():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
-    alt_client = get_alt_client()
-
-    # a should be public-read, b gets default (private)
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-
-    body = _get_body(response)
-    assert body == 'foocontent'
-    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
-
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
-    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
-
-    objs = get_objects_list(bucket=bucket_name, client=alt_client)
-    assert objs == ['bar', 'foo']
-    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_access_bucket_publicreadwrite_object_publicreadwrite():
-    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
-    body = _get_body(response)
-
-    # a should be public-read-write, b gets default (private)
-    assert body == 'foocontent'
-    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
-    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
-    objs = get_objects_list(bucket=bucket_name, client=alt_client)
-    assert objs == ['bar', 'foo']
-    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
-
-def test_buckets_create_then_list():
-    client = get_client()
-    bucket_names = []
-    for i in range(5):
-        bucket_name = get_new_bucket_name()
-        bucket_names.append(bucket_name)
-
-    for name in bucket_names:
-        client.create_bucket(Bucket=name)
-
-    response = client.list_buckets()
-    bucket_dicts = response['Buckets']
-    buckets_list = []
-
-    buckets_list = get_buckets_list()
-
-    for name in bucket_names:
-        if name not in buckets_list:
-            raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
-
-def test_buckets_list_ctime():
-    # check that creation times are within a day
-    before = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
-
-    client = get_client()
-    for i in range(5):
-        client.create_bucket(Bucket=get_new_bucket_name())
-
-    response = client.list_buckets()
-    for bucket in response['Buckets']:
-        ctime = bucket['CreationDate']
-        assert before <= ctime, '%r > %r' % (before, ctime)
-
-@pytest.mark.fails_on_aws
-def test_list_buckets_anonymous():
-    # Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
-    # emulating standard HTTP access.
-    #
-    # While it may have been possible to use httplib directly, doing it this way takes care of also
-    # allowing us to vary the calling format in testing.
-    unauthenticated_client = get_unauthenticated_client()
-    response = unauthenticated_client.list_buckets()
-    assert len(response['Buckets']) == 0
-
-def test_list_buckets_invalid_auth():
-    bad_auth_client = get_bad_auth_client()
-    e = assert_raises(ClientError, bad_auth_client.list_buckets)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-def test_list_buckets_bad_auth():
-    main_access_key = get_main_aws_access_key()
-    bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
-    e = assert_raises(ClientError, bad_auth_client.list_buckets)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-@pytest.fixture
-def override_prefix_a():
-    nuke_prefixed_buckets(prefix='a'+get_prefix())
-    yield
-    nuke_prefixed_buckets(prefix='a'+get_prefix())
-
-# this test goes outside the user-configure prefix because it needs to
-# control the initial character of the bucket name
-def test_bucket_create_naming_good_starts_alpha(override_prefix_a):
-    check_good_bucket_name('foo', _prefix='a'+get_prefix())
-
-@pytest.fixture
-def override_prefix_0():
-    nuke_prefixed_buckets(prefix='0'+get_prefix())
-    yield
-    nuke_prefixed_buckets(prefix='0'+get_prefix())
-
-# this test goes outside the user-configure prefix because it needs to
-# control the initial character of the bucket name
-def test_bucket_create_naming_good_starts_digit(override_prefix_0):
-    check_good_bucket_name('foo', _prefix='0'+get_prefix())
-
-def test_bucket_create_naming_good_contains_period():
-    check_good_bucket_name('aaa.111')
-
-def test_bucket_create_naming_good_contains_hyphen():
-    check_good_bucket_name('aaa-111')
-
-def test_bucket_recreate_not_overriding():
-    key_names = ['mykey1', 'mykey2']
-    bucket_name = _create_objects(keys=key_names)
-
-    objs_list = get_objects_list(bucket_name)
-    assert key_names == objs_list
-
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-
-    objs_list = get_objects_list(bucket_name)
-    assert key_names == objs_list
-
-@pytest.mark.fails_on_dbstore
-def test_bucket_create_special_key_names():
-    key_names = [
-        ' ',
-        '"',
-        '$',
-        '%',
-        '&',
-        '\'',
-        '<',
-        '>',
-        '_',
-        '_ ',
-        '_ _',
-        '__',
-    ]
-
-    bucket_name = _create_objects(keys=key_names)
-
-    objs_list = get_objects_list(bucket_name)
-    assert key_names == objs_list
-
-    client = get_client()
-
-    for name in key_names:
-        assert name in objs_list
-        response = client.get_object(Bucket=bucket_name, Key=name)
-        body = _get_body(response)
-        assert name == body
-        client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
-
-def test_bucket_list_special_prefix():
-    key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
-    bucket_name = _create_objects(keys=key_names)
-
-    objs_list = get_objects_list(bucket_name)
-
-    assert len(objs_list) == 5
-
-    objs_list = get_objects_list(bucket_name, prefix='_bla/')
-    assert len(objs_list) == 4
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_zero_size():
-    key = 'foo123bar'
-    bucket_name = _create_objects(keys=[key])
-    fp_a = FakeWriteFile(0, '')
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
-
-    copy_source = {'Bucket': bucket_name, 'Key': key}
-
-    client.copy(copy_source, bucket_name, 'bar321foo')
-    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
-    assert response['ContentLength'] == 0
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_16m():
-    bucket_name = get_new_bucket()
-    key1 = 'obj1'
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=key1, Body=bytearray(16*1024*1024))
-
-    copy_source = {'Bucket': bucket_name, 'Key': key1}
-    key2 = 'obj2'
-    client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source)
-    response = client.get_object(Bucket=bucket_name, Key=key2)
-    assert response['ContentLength'] == 16*1024*1024
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_same_bucket():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-
-    client.copy(copy_source, bucket_name, 'bar321foo')
-
-    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
-    body = _get_body(response)
-    assert 'foo' == body
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_verify_contenttype():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    content_type = 'text/bla'
-    client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-
-    client.copy(copy_source, bucket_name, 'bar321foo')
-
-    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
-    body = _get_body(response)
-    assert 'foo' == body
-    response_content_type = response['ContentType']
-    assert response_content_type == content_type
-
-def test_object_copy_to_itself():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-
-    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRequest'
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_to_itself_with_metadata():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-    metadata = {'foo': 'bar'}
-
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
-    response = client.get_object(Bucket=bucket_name, Key='foo123bar')
-    assert response['Metadata'] == metadata
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_diff_bucket():
-    bucket_name1 = get_new_bucket()
-    bucket_name2 = get_new_bucket()
-
-    client = get_client()
-    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
-
-    client.copy(copy_source, bucket_name2, 'bar321foo')
-
-    response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
-    body = _get_body(response)
-    assert 'foo' == body
-
-def test_object_copy_not_owned_bucket():
-    client = get_client()
-    alt_client = get_alt_client()
-    bucket_name1 = get_new_bucket_name()
-    bucket_name2 = get_new_bucket_name()
-    client.create_bucket(Bucket=bucket_name1)
-    alt_client.create_bucket(Bucket=bucket_name2)
-
-    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
-
-    e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-def test_object_copy_not_owned_object_bucket():
-    client = get_client()
-    alt_client = get_alt_client()
-    bucket_name = get_new_bucket_name()
-    client.create_bucket(Bucket=bucket_name)
-    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
-
-    alt_user_id = get_alt_user_id()
-
-    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
-    grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
-    client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
-
-    grant = add_bucket_user_grant(bucket_name, grant)
-    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
-
-    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-    alt_client.copy(copy_source, bucket_name, 'bar321foo')
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_canned_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    alt_client = get_alt_client()
-    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
-    # check ACL is applied by doing GET from another user
-    alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
-
-
-    metadata={'abc': 'def'}
-    copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
-    client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
-
-    # check ACL is applied by doing GET from another user
-    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_retaining_metadata():
-    for size in [3, 1024 * 1024]:
-        bucket_name = get_new_bucket()
-        client = get_client()
-        content_type = 'audio/ogg'
-
-        metadata = {'key1': 'value1', 'key2': 'value2'}
-        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
-
-        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
-
-        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
-        assert content_type == response['ContentType']
-        assert metadata == response['Metadata']
-        body = _get_body(response)
-        assert size == response['ContentLength']
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_replacing_metadata():
-    for size in [3, 1024 * 1024]:
-        bucket_name = get_new_bucket()
-        client = get_client()
-        content_type = 'audio/ogg'
-
-        metadata = {'key1': 'value1', 'key2': 'value2'}
-        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
-
-        metadata = {'key3': 'value3', 'key2': 'value2'}
-        content_type = 'audio/mpeg'
-
-        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
-
-        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
-        assert content_type == response['ContentType']
-        assert metadata == response['Metadata']
-        assert size == response['ContentLength']
-
-def test_object_copy_bucket_not_found():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
-    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
-    status = _get_status(e.response)
-    assert status == 404
-
-def test_object_copy_key_not_found():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
-    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
-    status = _get_status(e.response)
-    assert status == 404
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_versioned_bucket():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    size = 1*5
-    data = bytearray(size)
-    data_str = data.decode()
-    key1 = 'foo123bar'
-    client.put_object(Bucket=bucket_name, Key=key1, Body=data)
-
-    response = client.get_object(Bucket=bucket_name, Key=key1)
-    version_id = response['VersionId']
-
-    # copy object in the same bucket
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key2 = 'bar321foo'
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
-    response = client.get_object(Bucket=bucket_name, Key=key2)
-    body = _get_body(response)
-    assert data_str == body
-    assert size == response['ContentLength']
-
-
-    # second copy
-    version_id2 = response['VersionId']
-    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
-    key3 = 'bar321foo2'
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
-    response = client.get_object(Bucket=bucket_name, Key=key3)
-    body = _get_body(response)
-    assert data_str == body
-    assert size == response['ContentLength']
-
-    # copy to another versioned bucket
-    bucket_name2 = get_new_bucket()
-    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key4 = 'bar321foo3'
-    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
-    response = client.get_object(Bucket=bucket_name2, Key=key4)
-    body = _get_body(response)
-    assert data_str == body
-    assert size == response['ContentLength']
-
-    # copy to another non versioned bucket
-    bucket_name3 = get_new_bucket()
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key5 = 'bar321foo4'
-    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
-    response = client.get_object(Bucket=bucket_name3, Key=key5)
-    body = _get_body(response)
-    assert data_str == body
-    assert size == response['ContentLength']
-
-    # copy from a non versioned bucket
-    copy_source = {'Bucket': bucket_name3, 'Key': key5}
-    key6 = 'foo123bar2'
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
-    response = client.get_object(Bucket=bucket_name, Key=key6)
-    body = _get_body(response)
-    assert data_str == body
-    assert size == response['ContentLength']
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_versioned_url_encoding():
-    bucket = get_new_bucket_resource()
-    check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
-    src_key = 'foo?bar'
-    src = bucket.put_object(Key=src_key)
-    src.load() # HEAD request tests that the key exists
-
-    # copy object in the same bucket
-    dst_key = 'bar&foo'
-    dst = bucket.Object(dst_key)
-    dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
-    dst.load() # HEAD request tests that the key exists
-
-def generate_random(size, part_size=5*1024*1024):
-    """
-    Generate the specified number random data.
-    (actually each MB is a repetition of the first KB)
-    """
-    chunk = 1024
-    allowed = string.ascii_letters
-    for x in range(0, size, part_size):
-        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
-        s = ''
-        left = size - x
-        this_part_size = min(left, part_size)
-        for y in range(this_part_size // chunk):
-            s = s + strpart
-        if this_part_size > len(s):
-            s = s + strpart[0:this_part_size - len(s)]
-        yield s
-        if (x == size):
-            return
-
-def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
-    """
-    generate a multi-part upload for a random file of specifed size,
-    if requested, generate a list of the parts
-    return the upload descriptor
-    """
-    if client == None:
-        client = get_client()
-
-
-    if content_type == None and metadata == None:
-        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    else:
-        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
-
-    upload_id = response['UploadId']
-    s = ''
-    parts = []
-    for i, part in enumerate(generate_random(size, part_size)):
-        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
-        part_num = i+1
-        s += part
-        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
-        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
-        if i in resend_parts:
-            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
-
-    return (upload_id, s, parts)
-
-@pytest.mark.fails_on_dbstore
-def test_object_copy_versioning_multipart_upload():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key1 = "srcmultipart"
-    key1_metadata = {'foo': 'bar'}
-    content_type = 'text/bla'
-    objlen = 30 * 1024 * 1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.get_object(Bucket=bucket_name, Key=key1)
-    key1_size = response['ContentLength']
-    version_id = response['VersionId']
-
-    # copy object in the same bucket
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key2 = 'dstmultipart'
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
-    response = client.get_object(Bucket=bucket_name, Key=key2)
-    version_id2 = response['VersionId']
-    body = _get_body(response)
-    assert data == body
-    assert key1_size == response['ContentLength']
-    assert key1_metadata == response['Metadata']
-    assert content_type == response['ContentType']
-
-    # second copy
-    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
-    key3 = 'dstmultipart2'
-    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
-    response = client.get_object(Bucket=bucket_name, Key=key3)
-    body = _get_body(response)
-    assert data == body
-    assert key1_size == response['ContentLength']
-    assert key1_metadata == response['Metadata']
-    assert content_type == response['ContentType']
-
-    # copy to another versioned bucket
-    bucket_name2 = get_new_bucket()
-    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
-
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key4 = 'dstmultipart3'
-    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
-    response = client.get_object(Bucket=bucket_name2, Key=key4)
-    body = _get_body(response)
-    assert data == body
-    assert key1_size == response['ContentLength']
-    assert key1_metadata == response['Metadata']
-    assert content_type == response['ContentType']
-
-    # copy to another non versioned bucket
-    bucket_name3 = get_new_bucket()
-    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
-    key5 = 'dstmultipart4'
-    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
-    response = client.get_object(Bucket=bucket_name3, Key=key5)
-    body = _get_body(response)
-    assert data == body
-    assert key1_size == response['ContentLength']
-    assert key1_metadata == response['Metadata']
-    assert content_type == response['ContentType']
-
-    # copy from a non versioned bucket
-    copy_source = {'Bucket': bucket_name3, 'Key': key5}
-    key6 = 'dstmultipart5'
-    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
-    response = client.get_object(Bucket=bucket_name3, Key=key6)
-    body = _get_body(response)
-    assert data == body
-    assert key1_size == response['ContentLength']
-    assert key1_metadata == response['Metadata']
-    assert content_type == response['ContentType']
-
-def test_multipart_upload_empty():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    key1 = "mymultipart"
-    objlen = 0
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
-    e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_upload_small():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    key1 = "mymultipart"
-    objlen = 1
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    response = client.get_object(Bucket=bucket_name, Key=key1)
-    assert response['ContentLength'] == objlen
-    # check extra client.complete_multipart_upload
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
-    if bucket_name is None:
-        bucket_name = get_new_bucket()
-
-    if client == None:
-        client = get_client()
-
-    data_str = str(next(generate_random(size, size)))
-    data = bytes(data_str, 'utf-8')
-    client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
-
-    return bucket_name
-
-def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
-
-    if(client == None):
-        client = get_client()
-
-    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
-    upload_id = response['UploadId']
-
-    if(version_id == None):
-        copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
-    else:
-        copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
-
-    parts = []
-
-    i = 0
-    for start_offset in range(0, size, part_size):
-        end_offset = min(start_offset + part_size - 1, size - 1)
-        part_num = i+1
-        copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
-        response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
-        parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
-        i = i+1
-
-    return (upload_id, parts)
-
-def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
-    client = get_client()
-
-    if(version_id == None):
-        response = client.get_object(Bucket=src_bucket_name, Key=src_key)
-    else:
-        response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
-    src_size = response['ContentLength']
-
-    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
-    dest_size = response['ContentLength']
-    dest_data = _get_body(response)
-    assert(src_size >= dest_size)
-
-    r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
-    if(version_id == None):
-        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
-    else:
-        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
-    src_data = _get_body(response)
-    assert src_data == dest_data
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_copy_small():
-    src_key = 'foo'
-    src_bucket_name = _create_key_with_random_content(src_key)
-
-    dest_bucket_name = get_new_bucket()
-    dest_key = "mymultipart"
-    size = 1
-    client = get_client()
-
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
-    assert size == response['ContentLength']
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-def test_multipart_copy_invalid_range():
-    client = get_client()
-    src_key = 'source'
-    src_bucket_name = _create_key_with_random_content(src_key, size=5)
-
-    response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
-    upload_id = response['UploadId']
-
-    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
-    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
-
-    e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
-    status, error_code = _get_status_and_error_code(e.response)
-    valid_status = [400, 416]
-    if not status in valid_status:
-       raise AssertionError("Invalid response " + str(status))
-    assert error_code == 'InvalidRange'
-
-
-# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
-@pytest.mark.fails_on_rgw
-def test_multipart_copy_improper_range():
-    client = get_client()
-    src_key = 'source'
-    src_bucket_name = _create_key_with_random_content(src_key, size=5)
-
-    response = client.create_multipart_upload(
-        Bucket=src_bucket_name, Key='dest')
-    upload_id = response['UploadId']
-
-    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
-    test_ranges = ['{start}-{end}'.format(start=0, end=2),
-                   'bytes={start}'.format(start=0),
-                   'bytes=hello-world',
-                   'bytes=0-bar',
-                   'bytes=hello-',
-                   'bytes=0-2,3-5']
-
-    for test_range in test_ranges:
-        e = assert_raises(ClientError, client.upload_part_copy,
-                          Bucket=src_bucket_name, Key='dest',
-                          UploadId=upload_id,
-                          CopySource=copy_source,
-                          CopySourceRange=test_range,
-                          PartNumber=1)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 400
-        assert error_code == 'InvalidArgument'
-
-
-def test_multipart_copy_without_range():
-    client = get_client()
-    src_key = 'source'
-    src_bucket_name = _create_key_with_random_content(src_key, size=10)
-    dest_bucket_name = get_new_bucket_name()
-    get_new_bucket(name=dest_bucket_name)
-    dest_key = "mymultipartcopy"
-
-    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
-    upload_id = response['UploadId']
-    parts = []
-
-    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
-    part_num = 1
-    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
-
-    response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
-
-    parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
-    assert response['ContentLength'] == 10
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_copy_special_names():
-    src_bucket_name = get_new_bucket()
-
-    dest_bucket_name = get_new_bucket()
-
-    dest_key = "mymultipart"
-    size = 1
-    client = get_client()
-
-    for src_key in (' ', '_', '__', '?versionId'):
-        _create_key_with_random_content(src_key, bucket_name=src_bucket_name)
-        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
-        assert size == response['ContentLength']
-        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-def _check_content_using_range(key, bucket_name, data, step):
-    client = get_client()
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    size = response['ContentLength']
-
-    for ofs in range(0, size, step):
-        toread = size - ofs
-        if toread > step:
-            toread = step
-        end = ofs + toread - 1
-        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
-        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
-        assert response['ContentLength'] == toread
-        body = _get_body(response)
-        assert body == data[ofs:end+1]
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_upload():
-    bucket_name = get_new_bucket()
-    key="mymultipart"
-    content_type='text/bla'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    client = get_client()
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    # check extra client.complete_multipart_upload
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    assert response['ContentType'] == content_type
-    assert response['Metadata'] == metadata
-    body = _get_body(response)
-    assert len(body) == response['ContentLength']
-    assert body == data
-
-    _check_content_using_range(key, bucket_name, data, 1000000)
-    _check_content_using_range(key, bucket_name, data, 10000000)
-
-def check_versioning(bucket_name, status):
-    client = get_client()
-
-    try:
-        response = client.get_bucket_versioning(Bucket=bucket_name)
-        assert response['Status'] == status
-    except KeyError:
-        assert status == None
-
-# amazon is eventual consistent, retry a bit if failed
-def check_configure_versioning_retry(bucket_name, status, expected_string):
-    client = get_client()
-    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
-
-    read_status = None
-
-    for i in range(5):
-        try:
-            response = client.get_bucket_versioning(Bucket=bucket_name)
-            read_status = response['Status']
-        except KeyError:
-            read_status = None
-
-        if (expected_string == read_status):
-            break
-
-        time.sleep(1)
-
-    assert expected_string == read_status
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_copy_versioned():
-    src_bucket_name = get_new_bucket()
-    dest_bucket_name = get_new_bucket()
-
-    dest_key = "mymultipart"
-    check_versioning(src_bucket_name, None)
-
-    src_key = 'foo'
-    check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
-
-    size = 15 * 1024 * 1024
-    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
-    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
-    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
-
-    version_id = []
-    client = get_client()
-    response = client.list_object_versions(Bucket=src_bucket_name)
-    for ver in response['Versions']:
-        version_id.append(ver['VersionId'])
-
-    for vid in version_id:
-        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
-        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
-        assert size == response['ContentLength']
-        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
-
-def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
-    content_type = 'text/bla'
-    metadata = {'foo': 'bar'}
-    client = get_client()
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    assert response['ContentType'] == content_type
-    assert response['Metadata'] == metadata
-    body = _get_body(response)
-    assert len(body) == response['ContentLength']
-    assert body == data
-
-    _check_content_using_range(key, bucket_name, data, 1000000)
-    _check_content_using_range(key, bucket_name, data, 10000000)
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_upload_resend_part():
-    bucket_name = get_new_bucket()
-    key="mymultipart"
-    objlen = 30 * 1024 * 1024
-
-    _check_upload_multipart_resend(bucket_name, key, objlen, [0])
-    _check_upload_multipart_resend(bucket_name, key, objlen, [1])
-    _check_upload_multipart_resend(bucket_name, key, objlen, [2])
-    _check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
-    _check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
-
-def test_multipart_upload_multiple_sizes():
-    bucket_name = get_new_bucket()
-    key="mymultipart"
-    client = get_client()
-
-    objlen = 5*1024*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    objlen = 5*1024*1024+100*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    objlen = 5*1024*1024+600*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    objlen = 10*1024*1024+100*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    objlen = 10*1024*1024+600*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    objlen = 10*1024*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_copy_multiple_sizes():
-    src_key = 'foo'
-    src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
-
-    dest_bucket_name = get_new_bucket()
-    dest_key="mymultipart"
-    client = get_client()
-
-    size = 5*1024*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-    size = 5*1024*1024+100*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-    size = 5*1024*1024+600*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-    size = 10*1024*1024+100*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-    size = 10*1024*1024+600*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-    size = 10*1024*1024
-    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
-    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
-
-def test_multipart_upload_size_too_small():
-    bucket_name = get_new_bucket()
-    key="mymultipart"
-    client = get_client()
-
-    size = 100*1024
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
-    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'EntityTooSmall'
-
-def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
-    return ''.join(random.choice(chars) for _ in range(size))
-
-def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
-    payload=gen_rand_string(5)*1024*1024
-    client = get_client()
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    upload_id = response['UploadId']
-
-    parts = []
-
-    for part_num in range(0, num_parts):
-        part = bytes(payload, 'utf-8')
-        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
-        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
-
-    last_payload = '123'*1024*1024
-    last_part = bytes(last_payload, 'utf-8')
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
-    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
-
-    res = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert res['ETag'] != ''
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    test_string = _get_body(response)
-
-    all_payload = payload*num_parts + last_payload
-
-    assert test_string == all_payload
-
-    return all_payload
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_upload_contents():
-    bucket_name = get_new_bucket()
-    _do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
-
-def test_multipart_upload_overwrite_existing_object():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'mymultipart'
-    payload='12345'*1024*1024
-    num_parts=2
-    client.put_object(Bucket=bucket_name, Key=key, Body=payload)
-
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    upload_id = response['UploadId']
-
-    parts = []
-
-    for part_num in range(0, num_parts):
-        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
-        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
-
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    test_string = _get_body(response)
-
-    assert test_string == payload*num_parts
-
-def test_abort_multipart_upload():
-    bucket_name = get_new_bucket()
-    key="mymultipart"
-    objlen = 10 * 1024 * 1024
-    client = get_client()
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
-    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert 'Contents' not in response
-
-def test_abort_multipart_upload_not_found():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key="mymultipart"
-    client.put_object(Bucket=bucket_name, Key=key)
-
-    e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchUpload'
-
-@pytest.mark.fails_on_dbstore
-def test_list_multipart_upload():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key="mymultipart"
-    mb = 1024 * 1024
-
-    upload_ids = []
-    (upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
-    upload_ids.append(upload_id1)
-    (upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
-    upload_ids.append(upload_id2)
-
-    key2="mymultipart2"
-    (upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
-    upload_ids.append(upload_id3)
-
-    response = client.list_multipart_uploads(Bucket=bucket_name)
-    uploads = response['Uploads']
-    resp_uploadids = []
-
-    for i in range(0, len(uploads)):
-        resp_uploadids.append(uploads[i]['UploadId'])
-
-    for i in range(0, len(upload_ids)):
-        assert True == (upload_ids[i] in resp_uploadids)
-
-    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
-    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
-    client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
-
-@pytest.mark.fails_on_dbstore
-def test_list_multipart_upload_owner():
-    bucket_name = get_new_bucket()
-
-    client1 = get_client()
-    user1 = get_main_user_id()
-    name1 = get_main_display_name()
-
-    client2 = get_alt_client()
-    user2  = get_alt_user_id()
-    name2 = get_alt_display_name()
-
-    # add bucket acl for public read/write access
-    client1.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
-
-    key1 = 'multipart1'
-    key2 = 'multipart2'
-    upload1 = client1.create_multipart_upload(Bucket=bucket_name, Key=key1)['UploadId']
-    try:
-        upload2 = client2.create_multipart_upload(Bucket=bucket_name, Key=key2)['UploadId']
-        try:
-            # match fields of an Upload from ListMultipartUploadsResult
-            def match(upload, key, uploadid, userid, username):
-                assert upload['Key'] == key
-                assert upload['UploadId'] == uploadid
-                assert upload['Initiator']['ID'] == userid
-                assert upload['Initiator']['DisplayName'] == username
-                assert upload['Owner']['ID'] == userid
-                assert upload['Owner']['DisplayName'] == username
-
-            # list uploads with client1
-            uploads1 = client1.list_multipart_uploads(Bucket=bucket_name)['Uploads']
-            assert len(uploads1) == 2
-            match(uploads1[0], key1, upload1, user1, name1)
-            match(uploads1[1], key2, upload2, user2, name2)
-
-            # list uploads with client2
-            uploads2 = client2.list_multipart_uploads(Bucket=bucket_name)['Uploads']
-            assert len(uploads2) == 2
-            match(uploads2[0], key1, upload1, user1, name1)
-            match(uploads2[1], key2, upload2, user2, name2)
-        finally:
-            client2.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload2)
-    finally:
-        client1.abort_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload1)
-
-def test_multipart_upload_missing_part():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key="mymultipart"
-    size = 1
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    upload_id = response['UploadId']
-
-    parts = []
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
-    # 'PartNumber should be 1'
-    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
-
-    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidPart'
-
-def test_multipart_upload_incorrect_etag():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key="mymultipart"
-    size = 1
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    upload_id = response['UploadId']
-
-    parts = []
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
-    # 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
-    parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
-
-    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidPart'
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_get_part():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "mymultipart"
-
-    part_size = 5*1024*1024
-    part_sizes = 3 * [part_size] + [1*1024*1024]
-    part_count = len(part_sizes)
-    total_size = sum(part_sizes)
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name, key, total_size, part_size, resend_parts=[2])
-
-    # request part before complete
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=1)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert len(parts) == part_count
-
-    for part, size in zip(parts, part_sizes):
-        response = client.head_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
-        assert response['PartsCount'] == part_count
-        assert response['ETag'] == '"{}"'.format(part['ETag'])
-
-        response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
-        assert response['PartsCount'] == part_count
-        assert response['ETag'] == '"{}"'.format(part['ETag'])
-        assert response['ContentLength'] == size
-        # compare contents
-        for chunk in response['Body'].iter_chunks():
-            assert chunk.decode() == data[0:len(chunk)]
-            data = data[len(chunk):]
-
-    # request PartNumber out of range
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=5)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidPart'
-
-@pytest.mark.fails_on_dbstore
-def test_multipart_single_get_part():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "mymultipart"
-
-    part_size = 5*1024*1024
-    part_sizes = [part_size] # just one part
-    part_count = len(part_sizes)
-    total_size = sum(part_sizes)
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name, key, total_size, part_size)
-
-    # request part before complete
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=1)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert len(parts) == part_count
-
-    for part, size in zip(parts, part_sizes):
-        response = client.head_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
-        assert response['PartsCount'] == part_count
-        assert response['ETag'] == '"{}"'.format(part['ETag'])
-
-        response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=part['PartNumber'])
-        assert response['PartsCount'] == part_count
-        assert response['ETag'] == '"{}"'.format(part['ETag'])
-        assert response['ContentLength'] == size
-        # compare contents
-        for chunk in response['Body'].iter_chunks():
-            assert chunk.decode() == data[0:len(chunk)]
-            data = data[len(chunk):]
-
-    # request PartNumber out of range
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=5)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidPart'
-
-@pytest.mark.fails_on_dbstore
-def test_non_multipart_get_part():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "singlepart"
-
-    response = client.put_object(Bucket=bucket_name, Key=key, Body='body')
-    etag = response['ETag']
-
-    # request for PartNumber > 1 results in InvalidPart
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key, PartNumber=2)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidPart'
-
-    # request for PartNumber = 1 gives back the entire object
-    response = client.get_object(Bucket=bucket_name, Key=key, PartNumber=1)
-    assert response['ETag'] == etag
-    assert _get_body(response) == 'body'
-
-
-def _simple_http_req_100_cont(host, port, is_secure, method, resource):
-    """
-    Send the specified request w/expect 100-continue
-    and await confirmation.
-    """
-    req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
-            method=method,
-            resource=resource,
-            host=host,
-            )
-
-    req = bytes(req_str, 'utf-8')
-
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    if is_secure:
-        s = ssl.wrap_socket(s);
-    s.settimeout(5)
-    s.connect((host, port))
-    s.send(req)
-
-    try:
-        data = s.recv(1024)
-    except socket.error as msg:
-        print('got response: ', msg)
-        print('most likely server doesn\'t support 100-continue')
-
-    s.close()
-    data_str = data.decode()
-    l = data_str.split(' ')
-
-    assert l[0].startswith('HTTP')
-
-    return l[1]
-
-def test_100_continue():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    objname='testobj'
-    resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
-
-    host = get_config_host()
-    port = get_config_port()
-    is_secure = get_config_is_secure()
-
-    #NOTES: this test needs to be tested when is_secure is True
-    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
-    assert status == '403'
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
-
-    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
-    assert status == '100'
-
-def test_set_cors():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    allowed_methods = ['GET', 'PUT']
-    allowed_origins = ['*.get', '*.put']
-
-    cors_config ={
-        'CORSRules': [
-            {'AllowedMethods': allowed_methods,
-             'AllowedOrigins': allowed_origins,
-            },
-        ]
-    }
-
-    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
-    status = _get_status(e.response)
-    assert status == 404
-
-    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
-    response = client.get_bucket_cors(Bucket=bucket_name)
-    assert response['CORSRules'][0]['AllowedMethods'] == allowed_methods
-    assert response['CORSRules'][0]['AllowedOrigins'] == allowed_origins
-
-    client.delete_bucket_cors(Bucket=bucket_name)
-    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
-    status = _get_status(e.response)
-    assert status == 404
-
-def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
-    r = func(url, headers=headers, verify=get_config_ssl_verify())
-    assert r.status_code == expect_status
-
-    assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
-    assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
-
-def test_cors_origin_response():
-    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
-    client = get_client()
-
-    cors_config ={
-        'CORSRules': [
-            {'AllowedMethods': ['GET'],
-             'AllowedOrigins': ['*suffix'],
-            },
-            {'AllowedMethods': ['GET'],
-             'AllowedOrigins': ['start*end'],
-            },
-            {'AllowedMethods': ['GET'],
-             'AllowedOrigins': ['prefix*'],
-            },
-            {'AllowedMethods': ['PUT'],
-             'AllowedOrigins': ['*.put'],
-            }
-        ]
-    }
-
-    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
-    status = _get_status(e.response)
-    assert status == 404
-
-    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
-
-    time.sleep(3)
-
-    url = _get_post_url(bucket_name)
-
-    _cors_request_and_check(requests.get, url, None, 200, None, None)
-    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
-    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
-    _cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
-    _cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
-    _cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
-
-    obj_url = '{u}/{o}'.format(u=url, o='bar')
-    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
-    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
-                                                    'content-length': '0'}, 403, 'foo.suffix', 'GET')
-    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
-                                                    'content-length': '0'}, 403, None, None)
-
-    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
-                                                    'content-length': '0'}, 403, None, None)
-    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
-
-    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
-
-    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
-
-    _cors_request_and_check(requests.options, url, None, 400, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
-    _cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
-                                                    'content-length': '0'}, 200, 'foo.suffix', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
-    _cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
-    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
-
-def test_cors_origin_wildcard():
-    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
-    client = get_client()
-
-    cors_config ={
-        'CORSRules': [
-            {'AllowedMethods': ['GET'],
-             'AllowedOrigins': ['*'],
-            },
-        ]
-    }
-
-    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
-    status = _get_status(e.response)
-    assert status == 404
-
-    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
-
-    time.sleep(3)
-
-    url = _get_post_url(bucket_name)
-
-    _cors_request_and_check(requests.get, url, None, 200, None, None)
-    _cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
-
-def test_cors_header_option():
-    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
-    client = get_client()
-
-    cors_config ={
-        'CORSRules': [
-            {'AllowedMethods': ['GET'],
-             'AllowedOrigins': ['*'],
-             'ExposeHeaders': ['x-amz-meta-header1'],
-            },
-        ]
-    }
-
-    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
-    status = _get_status(e.response)
-    assert status == 404
-
-    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
-
-    time.sleep(3)
-
-    url = _get_post_url(bucket_name)
-    obj_url = '{u}/{o}'.format(u=url, o='bar')
-
-    _cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
-
-def _test_cors_options_presigned_method(client, method, cannedACL=None):
-    bucket_name = _setup_bucket_object_acl('public-read', 'public-read', client=client)
-    params = {'Bucket': bucket_name, 'Key': 'foo'}
-
-    if cannedACL is not None:
-        params['ACL'] = cannedACL
-
-    if method == 'get_object':
-        httpMethod = 'GET'
-    elif method == 'put_object':
-        httpMethod = 'PUT'
-    else:
-        raise ValueError('invalid method')
-
-    url = client.generate_presigned_url(ClientMethod=method, Params=params, ExpiresIn=100000, HttpMethod=httpMethod)
-
-    res = requests.options(url, verify=get_config_ssl_verify()).__dict__
-    assert res['status_code'] == 400
-
-    allowed_methods = [httpMethod]
-    allowed_origins = ['example']
-
-    cors_config ={
-        'CORSRules': [
-            {'AllowedMethods': allowed_methods,
-             'AllowedOrigins': allowed_origins,
-            },
-        ]
-    }
-
-    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
-
-    headers = {
-        'Origin': 'example',
-        'Access-Control-Request-Method': httpMethod,
-    }
-    _cors_request_and_check(requests.options, url, headers,
-                            200, 'example', httpMethod)
-
-def test_cors_presigned_get_object():
-    _test_cors_options_presigned_method(
-        client=get_client(),
-        method='get_object',
-    )
-
-def test_cors_presigned_get_object_tenant():
-    _test_cors_options_presigned_method(
-        client=get_tenant_client(),
-        method='get_object',
-    )
-
-def test_cors_presigned_put_object():
-    _test_cors_options_presigned_method(
-        client=get_client(),
-        method='put_object',
-    )
-
-def test_cors_presigned_put_object_with_acl():
-    _test_cors_options_presigned_method(
-        client=get_client(),
-        method='put_object',
-        cannedACL='private',
-    )
-
-def test_cors_presigned_put_object_tenant():
-    _test_cors_options_presigned_method(
-        client=get_tenant_client(),
-        method='put_object',
-    )
-
-def test_cors_presigned_put_object_tenant_with_acl():
-    _test_cors_options_presigned_method(
-        client=get_tenant_client(),
-        method='put_object',
-        cannedACL='private',
-    )
-
-@pytest.mark.tagging
-def test_set_bucket_tagging():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    tags={
-        'TagSet': [
-            {
-                'Key': 'Hello',
-                'Value': 'World'
-            },
-        ]
-    }
-
-    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchTagSet'
-
-    client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
-
-    response = client.get_bucket_tagging(Bucket=bucket_name)
-    assert len(response['TagSet']) == 1
-    assert response['TagSet'][0]['Key'] == 'Hello'
-    assert response['TagSet'][0]['Value'] == 'World'
-
-    response = client.delete_bucket_tagging(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchTagSet'
-
-
-class FakeFile(object):
-    """
-    file that simulates seek, tell, and current character
-    """
-    def __init__(self, char='A', interrupt=None):
-        self.offset = 0
-        self.char = bytes(char, 'utf-8')
-        self.interrupt = interrupt
-
-    def seek(self, offset, whence=os.SEEK_SET):
-        if whence == os.SEEK_SET:
-            self.offset = offset
-        elif whence == os.SEEK_END:
-            self.offset = self.size + offset;
-        elif whence == os.SEEK_CUR:
-            self.offset += offset
-
-    def tell(self):
-        return self.offset
-
-class FakeWriteFile(FakeFile):
-    """
-    file that simulates interruptable reads of constant data
-    """
-    def __init__(self, size, char='A', interrupt=None):
-        FakeFile.__init__(self, char, interrupt)
-        self.size = size
-
-    def read(self, size=-1):
-        if size < 0:
-            size = self.size - self.offset
-        count = min(size, self.size - self.offset)
-        self.offset += count
-
-        # Sneaky! do stuff before we return (the last time)
-        if self.interrupt != None and self.offset == self.size and count > 0:
-            self.interrupt()
-
-        return self.char*count
-
-class FakeReadFile(FakeFile):
-    """
-    file that simulates writes, interrupting after the second
-    """
-    def __init__(self, size, char='A', interrupt=None):
-        FakeFile.__init__(self, char, interrupt)
-        self.interrupted = False
-        self.size = 0
-        self.expected_size = size
-
-    def write(self, chars):
-        assert chars == self.char*len(chars)
-        self.offset += len(chars)
-        self.size += len(chars)
-
-        # Sneaky! do stuff on the second seek
-        if not self.interrupted and self.interrupt != None \
-                and self.offset > 0:
-            self.interrupt()
-            self.interrupted = True
-
-    def close(self):
-        assert self.size == self.expected_size
-
-class FakeFileVerifier(object):
-    """
-    file that verifies expected data has been written
-    """
-    def __init__(self, char=None):
-        self.char = char
-        self.size = 0
-
-    def write(self, data):
-        size = len(data)
-        if self.char == None:
-            self.char = data[0]
-        self.size += size
-        assert data.decode() == self.char*size
-
-def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
-    """
-    Make sure file is of the expected size and (simulated) content
-    """
-    fp_verify = FakeFileVerifier(char)
-    client = get_client()
-    client.download_fileobj(bucket_name, key, fp_verify)
-    if size >= 0:
-        assert fp_verify.size == size
-
-def _test_atomic_read(file_size):
-    """
-    Create a file of A's, use it to set_contents_from_file.
-    Create a file of B's, use it to re-set_contents_from_file.
-    Re-read the contents, and confirm we get B's
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-
-    fp_a = FakeWriteFile(file_size, 'A')
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
-
-    fp_b = FakeWriteFile(file_size, 'B')
-    fp_a2 = FakeReadFile(file_size, 'A',
-        lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
-        )
-
-    read_client = get_client()
-
-    read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
-    fp_a2.close()
-
-    _verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
-
-def test_atomic_read_1mb():
-    _test_atomic_read(1024*1024)
-
-def test_atomic_read_4mb():
-    _test_atomic_read(1024*1024*4)
-
-def test_atomic_read_8mb():
-    _test_atomic_read(1024*1024*8)
-
-def _test_atomic_write(file_size):
-    """
-    Create a file of A's, use it to set_contents_from_file.
-    Verify the contents are all A's.
-    Create a file of B's, use it to re-set_contents_from_file.
-    Before re-set continues, verify content's still A's
-    Re-read the contents, and confirm we get B's
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-    objname = 'testobj'
-
-
-    # create <file_size> file of A's
-    fp_a = FakeWriteFile(file_size, 'A')
-    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
-
-
-    # verify A's
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
-
-    # create <file_size> file of B's
-    # but try to verify the file before we finish writing all the B's
-    fp_b = FakeWriteFile(file_size, 'B',
-        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
-        )
-
-    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
-
-    # verify B's
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
-
-def test_atomic_write_1mb():
-    _test_atomic_write(1024*1024)
-
-def test_atomic_write_4mb():
-    _test_atomic_write(1024*1024*4)
-
-def test_atomic_write_8mb():
-    _test_atomic_write(1024*1024*8)
-
-def _test_atomic_dual_write(file_size):
-    """
-    create an object, two sessions writing different contents
-    confirm that it is all one or the other
-    """
-    bucket_name = get_new_bucket()
-    objname = 'testobj'
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key=objname)
-
-    # write <file_size> file of B's
-    # but before we're done, try to write all A's
-    fp_a = FakeWriteFile(file_size, 'A')
-
-    def rewind_put_fp_a():
-        fp_a.seek(0)
-        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
-
-    fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
-    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
-
-    # verify the file
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
-
-def test_atomic_dual_write_1mb():
-    _test_atomic_dual_write(1024*1024)
-
-def test_atomic_dual_write_4mb():
-    _test_atomic_dual_write(1024*1024*4)
-
-def test_atomic_dual_write_8mb():
-    _test_atomic_dual_write(1024*1024*8)
-
-def _test_atomic_conditional_write(file_size):
-    """
-    Create a file of A's, use it to set_contents_from_file.
-    Verify the contents are all A's.
-    Create a file of B's, use it to re-set_contents_from_file.
-    Before re-set continues, verify content's still A's
-    Re-read the contents, and confirm we get B's
-    """
-    bucket_name = get_new_bucket()
-    objname = 'testobj'
-    client = get_client()
-
-    # create <file_size> file of A's
-    fp_a = FakeWriteFile(file_size, 'A')
-    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
-
-    fp_b = FakeWriteFile(file_size, 'B',
-        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
-        )
-
-    # create <file_size> file of B's
-    # but try to verify the file before we finish writing all the B's
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
-
-    # verify B's
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
-
-@pytest.mark.fails_on_aws
-def test_atomic_conditional_write_1mb():
-    _test_atomic_conditional_write(1024*1024)
-
-def _test_atomic_dual_conditional_write(file_size):
-    """
-    create an object, two sessions writing different contents
-    confirm that it is all one or the other
-    """
-    bucket_name = get_new_bucket()
-    objname = 'testobj'
-    client = get_client()
-
-    fp_a = FakeWriteFile(file_size, 'A')
-    response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
-    etag_fp_a = response['ETag'].replace('"', '')
-
-    # write <file_size> file of C's
-    # but before we're done, try to write all B's
-    fp_b = FakeWriteFile(file_size, 'B')
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    def rewind_put_fp_b():
-        fp_b.seek(0)
-        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
-
-    fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-    # verify the file
-    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
-
-@pytest.mark.fails_on_aws
-# TODO: test not passing with SSL, fix this
-@pytest.mark.fails_on_rgw
-def test_atomic_dual_conditional_write_1mb():
-    _test_atomic_dual_conditional_write(1024*1024)
-
-@pytest.mark.fails_on_aws
-# TODO: test not passing with SSL, fix this
-@pytest.mark.fails_on_rgw
-def test_atomic_write_bucket_gone():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    def remove_bucket():
-        client.delete_bucket(Bucket=bucket_name)
-
-    objname = 'foo'
-    fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchBucket'
-
-def test_atomic_multipart_upload_write():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
-    upload_id = response['UploadId']
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-    client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
-
-    response = client.get_object(Bucket=bucket_name, Key='foo')
-    body = _get_body(response)
-    assert body == 'bar'
-
-class Counter:
-    def __init__(self, default_val):
-        self.val = default_val
-
-    def inc(self):
-        self.val = self.val + 1
-
-class ActionOnCount:
-    def __init__(self, trigger_count, action):
-        self.count = 0
-        self.trigger_count = trigger_count
-        self.action = action
-        self.result = 0
-
-    def trigger(self):
-        self.count = self.count + 1
-
-        if self.count == self.trigger_count:
-            self.result = self.action()
-
-def test_multipart_resend_first_finishes_last():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key_name = "mymultipart"
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
-    upload_id = response['UploadId']
-
-    #file_size = 8*1024*1024
-    file_size = 8
-
-    counter = Counter(0)
-    # upload_part might read multiple times from the object
-    # first time when it calculates md5, second time when it writes data
-    # out. We want to interject only on the last time, but we can't be
-    # sure how many times it's going to read, so let's have a test run
-    # and count the number of reads
-
-    fp_dry_run = FakeWriteFile(file_size, 'C',
-        lambda: counter.inc()
-        )
-
-    parts = []
-
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
-
-    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    client.delete_object(Bucket=bucket_name, Key=key_name)
-
-    # clear parts
-    parts[:] = []
-
-    # ok, now for the actual test
-    fp_b = FakeWriteFile(file_size, 'B')
-    def upload_fp_b():
-        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
-        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
-
-    action = ActionOnCount(counter.val, lambda: upload_fp_b())
-
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
-    upload_id = response['UploadId']
-
-    fp_a = FakeWriteFile(file_size, 'A',
-        lambda: action.trigger()
-        )
-
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
-
-    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    _verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
-
-@pytest.mark.fails_on_dbstore
-def test_ranged_request_response_code():
-    content = 'testcontent'
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
-
-    fetched_content = _get_body(response)
-    assert fetched_content == content[4:8]
-    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-7/11'
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
-
-def _generate_random_string(size):
-    return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
-
-@pytest.mark.fails_on_dbstore
-def test_ranged_big_request_response_code():
-    content = _generate_random_string(8*1024*1024)
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
-
-    fetched_content = _get_body(response)
-    assert fetched_content == content[3145728:5242881]
-    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 3145728-5242880/8388608'
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
-
-@pytest.mark.fails_on_dbstore
-def test_ranged_request_skip_leading_bytes_response_code():
-    content = 'testcontent'
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
-
-    fetched_content = _get_body(response)
-    assert fetched_content == content[4:]
-    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11'
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
-
-@pytest.mark.fails_on_dbstore
-def test_ranged_request_return_trailing_bytes_response_code():
-    content = 'testcontent'
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
-
-    fetched_content = _get_body(response)
-    assert fetched_content == content[-7:]
-    assert response['ResponseMetadata']['HTTPHeaders']['content-range'] == 'bytes 4-10/11'
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 206
-
-def test_ranged_request_invalid_range():
-    content = 'testcontent'
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-
-    # test invalid range
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 416
-    assert error_code == 'InvalidRange'
-
-def test_ranged_request_empty_object():
-    content = ''
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
-
-    # test invalid range
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 416
-    assert error_code == 'InvalidRange'
-
-def test_versioning_bucket_create_suspend():
-    bucket_name = get_new_bucket()
-    check_versioning(bucket_name, None)
-
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-
-def check_obj_content(client, bucket_name, key, version_id, content):
-    response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
-    if content is not None:
-        body = _get_body(response)
-        assert body == content
-    else:
-        assert response['DeleteMarker'] == True
-
-def check_obj_versions(client, bucket_name, key, version_ids, contents):
-    # check to see if objects is pointing at correct version
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    versions = []
-    versions = response['Versions']
-    # obj versions in versions come out created last to first not first to last like version_ids & contents
-    versions.reverse()
-    i = 0
-
-    for version in versions:
-        assert version['VersionId'] == version_ids[i]
-        assert version['Key'] == key
-        check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
-        i += 1
-
-def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
-    contents = contents or []
-    version_ids = version_ids or []
-
-    for i in range(num_versions):
-        body = 'content-{i}'.format(i=i)
-        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-        version_id = response['VersionId']
-
-        contents.append(body)
-        version_ids.append(version_id)
-
-#    if check_versions:
-#        check_obj_versions(client, bucket_name, key, version_ids, contents)
-
-    return (version_ids, contents)
-
-def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
-    assert len(version_ids) == len(contents)
-    index = index % len(version_ids)
-    rm_version_id = version_ids.pop(index)
-    rm_content = contents.pop(index)
-
-    check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
-
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
-
-    if len(version_ids) != 0:
-        check_obj_versions(client, bucket_name, key, version_ids, contents)
-
-def clean_up_bucket(client, bucket_name, key, version_ids):
-    for version_id in version_ids:
-        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
-
-    client.delete_bucket(Bucket=bucket_name)
-
-def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    idx = remove_start_idx
-
-    for j in range(num_versions):
-        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
-        idx += idx_inc
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    if 'Versions' in response:
-        print(response['Versions'])
-
-
-def test_versioning_obj_create_read_remove():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
-    key = 'testobj'
-    num_versions = 5
-
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
-    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
-
-def test_versioning_obj_create_read_remove_head():
-    bucket_name = get_new_bucket()
-
-    client = get_client()
-    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
-    key = 'testobj'
-    num_versions = 5
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    # removes old head object, checks new one
-    removed_version_id = version_ids.pop()
-    contents.pop()
-    num_versions = num_versions-1
-
-    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == contents[-1]
-
-    # add a delete marker
-    response = client.delete_object(Bucket=bucket_name, Key=key)
-    assert response['DeleteMarker'] == True
-
-    delete_marker_version_id = response['VersionId']
-    version_ids.append(delete_marker_version_id)
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert len(response['Versions']) == num_versions
-    assert len(response['DeleteMarkers']) == 1
-    assert response['DeleteMarkers'][0]['VersionId'] == delete_marker_version_id
-
-    clean_up_bucket(client, bucket_name, key, version_ids)
-
-def test_versioning_obj_plain_null_version_removal():
-    bucket_name = get_new_bucket()
-    check_versioning(bucket_name, None)
-
-    client = get_client()
-    key = 'testobjfoo'
-    content = 'fooz'
-    client.put_object(Bucket=bucket_name, Key=key, Body=content)
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-def test_versioning_obj_plain_null_version_overwrite():
-    bucket_name = get_new_bucket()
-    check_versioning(bucket_name, None)
-
-    client = get_client()
-    key = 'testobjfoo'
-    content = 'fooz'
-    client.put_object(Bucket=bucket_name, Key=key, Body=content)
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    content2 = 'zzz'
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == content2
-
-    version_id = response['VersionId']
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == content
-
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-def test_versioning_obj_plain_null_version_overwrite_suspended():
-    bucket_name = get_new_bucket()
-    check_versioning(bucket_name, None)
-
-    client = get_client()
-    key = 'testobjbar'
-    content = 'foooz'
-    client.put_object(Bucket=bucket_name, Key=key, Body=content)
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-
-    content2 = 'zzz'
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == content2
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    # original object with 'null' version id still counts as a version
-    assert len(response['Versions']) == 1
-
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'NoSuchKey'
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
-    client.delete_object(Bucket=bucket_name, Key=key)
-
-    # clear out old null objects in lists since they will get overwritten
-    assert len(version_ids) == len(contents)
-    i = 0
-    for version_id in version_ids:
-        if version_id == 'null':
-            version_ids.pop(i)
-            contents.pop(i)
-        i += 1
-
-    return (version_ids, contents)
-
-def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
-    client.put_object(Bucket=bucket_name, Key=key, Body=content)
-
-    # clear out old null objects in lists since they will get overwritten
-    assert len(version_ids) == len(contents)
-    i = 0
-    for version_id in version_ids:
-        if version_id == 'null':
-            version_ids.pop(i)
-            contents.pop(i)
-        i += 1
-
-    # add new content with 'null' version id to the end
-    contents.append(content)
-    version_ids.append('null')
-
-    return (version_ids, contents)
-
-
-def test_versioning_obj_suspend_versions():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'testobj'
-    num_versions = 5
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-
-    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
-    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
-
-    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
-    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
-    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
-    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
-    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
-    num_versions += 3
-
-    for idx in range(num_versions):
-        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
-
-    assert len(version_ids) == 0
-    assert len(version_ids) == len(contents)
-
-@pytest.mark.fails_on_dbstore
-def test_versioning_obj_suspended_copy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key1 = 'testobj1'
-    num_versions = 1
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key1, num_versions)
-
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-
-    content = 'null content'
-    overwrite_suspended_versioning_obj(client, bucket_name, key1, version_ids, contents, content)
-
-    # copy to another object
-    key2 = 'testobj2'
-    copy_source = {'Bucket': bucket_name, 'Key': key1}
-    client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source)
-
-    # delete the source object. keep the 'null' entry in version_ids
-    client.delete_object(Bucket=bucket_name, Key=key1)
-
-    # get the target object
-    response = client.get_object(Bucket=bucket_name, Key=key2)
-    body = _get_body(response)
-    assert body == content
-
-    # cleaning up
-    client.delete_object(Bucket=bucket_name, Key=key2)
-    client.delete_object(Bucket=bucket_name, Key=key2, VersionId='null')
-
-    clean_up_bucket(client, bucket_name, key1, version_ids)
-
-def test_versioning_obj_create_versions_remove_all():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'testobj'
-    num_versions = 10
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-    for idx in range(num_versions):
-        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
-
-    assert len(version_ids) == 0
-    assert len(version_ids) == len(contents)
-
-def test_versioning_obj_create_versions_remove_special_names():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    keys = ['_testobj', '_', ':', ' ']
-    num_versions = 10
-
-    for key in keys:
-        (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-        for idx in range(num_versions):
-            remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
-
-        assert len(version_ids) == 0
-        assert len(version_ids) == len(contents)
-
-@pytest.mark.fails_on_dbstore
-def test_versioning_obj_create_overwrite_multipart():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'testobj'
-    num_versions = 3
-    contents = []
-    version_ids = []
-
-    for i in range(num_versions):
-        ret =  _do_test_multipart_upload_contents(bucket_name, key, 3)
-        contents.append(ret)
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    for version in response['Versions']:
-        version_ids.append(version['VersionId'])
-
-    version_ids.reverse()
-    check_obj_versions(client, bucket_name, key, version_ids, contents)
-
-    for idx in range(num_versions):
-        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
-
-    assert len(version_ids) == 0
-    assert len(version_ids) == len(contents)
-
-def test_versioning_obj_list_marker():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'testobj'
-    key2 = 'testobj-1'
-    num_versions = 5
-
-    contents = []
-    version_ids = []
-    contents2 = []
-    version_ids2 = []
-
-    # for key #1
-    for i in range(num_versions):
-        body = 'content-{i}'.format(i=i)
-        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-        version_id = response['VersionId']
-
-        contents.append(body)
-        version_ids.append(version_id)
-
-    # for key #2
-    for i in range(num_versions):
-        body = 'content-{i}'.format(i=i)
-        response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
-        version_id = response['VersionId']
-
-        contents2.append(body)
-        version_ids2.append(version_id)
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    versions = response['Versions']
-    # obj versions in versions come out created last to first not first to last like version_ids & contents
-    versions.reverse()
-
-    i = 0
-    # test the last 5 created objects first
-    for i in range(5):
-        version = versions[i]
-        assert version['VersionId'] == version_ids2[i]
-        assert version['Key'] == key2
-        check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
-        i += 1
-
-    # then the first 5
-    for j in range(5):
-        version = versions[i]
-        assert version['VersionId'] == version_ids[j]
-        assert version['Key'] == key
-        check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
-        i += 1
-
-@pytest.mark.fails_on_dbstore
-def test_versioning_copy_obj_version():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'testobj'
-    num_versions = 3
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    for i in range(num_versions):
-        new_key_name = 'key_{i}'.format(i=i)
-        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
-        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
-        response = client.get_object(Bucket=bucket_name, Key=new_key_name)
-        body = _get_body(response)
-        assert body == contents[i]
-
-    another_bucket_name = get_new_bucket()
-
-    for i in range(num_versions):
-        new_key_name = 'key_{i}'.format(i=i)
-        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
-        client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
-        response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
-        body = _get_body(response)
-        assert body == contents[i]
-
-    new_key_name = 'new_key'
-    copy_source = {'Bucket': bucket_name, 'Key': key}
-    client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
-
-    response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
-    body = _get_body(response)
-    assert body == contents[-1]
-
-def test_versioning_multi_object_delete():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'key'
-    num_versions = 2
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-    assert len(version_ids) == 2
-
-    # delete both versions
-    objects = [{'Key': key, 'VersionId': v} for v in version_ids]
-    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-    # now remove again, should all succeed due to idempotency
-    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-def test_versioning_multi_object_delete_with_marker():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'key'
-    num_versions = 2
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-    assert len(version_ids) == num_versions
-    objects = [{'Key': key, 'VersionId': v} for v in version_ids]
-
-    # create a delete marker
-    response = client.delete_object(Bucket=bucket_name, Key=key)
-    assert response['DeleteMarker']
-    objects += [{'Key': key, 'VersionId': response['VersionId']}]
-
-    # delete all versions
-    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-    assert not 'DeleteMarkers' in response
-
-    # now remove again, should all succeed due to idempotency
-    client.delete_objects(Bucket=bucket_name, Delete={'Objects': objects})
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-    assert not 'DeleteMarkers' in response
-
-@pytest.mark.fails_on_dbstore
-def test_versioning_multi_object_delete_with_marker_create():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'key'
-
-    # use delete_objects() to create a delete marker
-    response = client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]})
-    assert len(response['Deleted']) == 1
-    assert response['Deleted'][0]['DeleteMarker']
-    delete_marker_version_id = response['Deleted'][0]['DeleteMarkerVersionId']
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    delete_markers = response['DeleteMarkers']
-
-    assert len(delete_markers) == 1
-    assert delete_marker_version_id == delete_markers[0]['VersionId']
-    assert key == delete_markers[0]['Key']
-
-def test_versioned_object_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'xyz'
-    num_versions = 3
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    version_id = version_ids[1]
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    assert response['Owner']['DisplayName'] == display_name
-    assert response['Owner']['ID'] == user_id
-
-    grants = response['Grants']
-    default_policy = [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ]
-
-    check_grants(grants, default_policy)
-
-    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-    client.put_object(Bucket=bucket_name, Key=key)
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key)
-    grants = response['Grants']
-    check_grants(grants, default_policy)
-
-@pytest.mark.fails_on_dbstore
-def test_versioned_object_acl_no_version_specified():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'xyz'
-    num_versions = 3
-
-    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    version_id = response['VersionId']
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
-
-    display_name = get_main_display_name()
-    user_id = get_main_user_id()
-
-    assert response['Owner']['DisplayName'] == display_name
-    assert response['Owner']['ID'] == user_id
-
-    grants = response['Grants']
-    default_policy = [
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ]
-
-    check_grants(grants, default_policy)
-
-    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
-
-    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
-    grants = response['Grants']
-    check_grants(
-        grants,
-        [
-            dict(
-                Permission='READ',
-                ID=None,
-                DisplayName=None,
-                URI='http://acs.amazonaws.com/groups/global/AllUsers',
-                EmailAddress=None,
-                Type='Group',
-                ),
-            dict(
-                Permission='FULL_CONTROL',
-                ID=user_id,
-                DisplayName=display_name,
-                URI=None,
-                EmailAddress=None,
-                Type='CanonicalUser',
-                ),
-            ],
-        )
-
-def _do_create_object(client, bucket_name, key, i):
-    body = 'data {i}'.format(i=i)
-    client.put_object(Bucket=bucket_name, Key=key, Body=body)
-
-def _do_remove_ver(client, bucket_name, key, version_id):
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
-
-def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
-    t = []
-    for i in range(num):
-        thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
-        thr.start()
-        t.append(thr)
-    return t
-
-def _do_clear_versioned_bucket_concurrent(client, bucket_name):
-    t = []
-    response = client.list_object_versions(Bucket=bucket_name)
-    for version in response.get('Versions', []):
-        thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
-        thr.start()
-        t.append(thr)
-    return t
-
-# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
-@pytest.mark.fails_on_rgw
-def test_versioned_concurrent_object_create_concurrent_remove():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'myobj'
-    num_versions = 5
-
-    for i in range(5):
-        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
-        _do_wait_completion(t)
-
-        response = client.list_object_versions(Bucket=bucket_name)
-        versions = response['Versions']
-
-        assert len(versions) == num_versions
-
-        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
-        _do_wait_completion(t)
-
-        response = client.list_object_versions(Bucket=bucket_name)
-        assert not 'Versions' in response
-
-def test_versioned_concurrent_object_create_and_remove():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    key = 'myobj'
-    num_versions = 3
-
-    all_threads = []
-
-    for i in range(3):
-
-        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
-        all_threads.append(t)
-
-        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
-        all_threads.append(t)
-
-    for t in all_threads:
-        _do_wait_completion(t)
-
-    t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
-    _do_wait_completion(t)
-
-    response = client.list_object_versions(Bucket=bucket_name)
-    assert not 'Versions' in response
-
-@pytest.mark.lifecycle
-def test_lifecycle_set():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
-           {'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-def test_lifecycle_get():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
-           {'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
-    assert response['Rules'] == rules
-
-@pytest.mark.lifecycle
-def test_lifecycle_get_no_id():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
-           {'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
-    current_lc = response['Rules']
-
-    Rule = namedtuple('Rule',['prefix','status','days'])
-    rules = {'rule1' : Rule('test1/','Enabled',31),
-             'rule2' : Rule('test2/','Enabled',120)}
-
-    for lc_rule in current_lc:
-        if lc_rule['Prefix'] == rules['rule1'].prefix:
-            assert lc_rule['Expiration']['Days'] == rules['rule1'].days
-            assert lc_rule['Status'] == rules['rule1'].status
-            assert 'ID' in lc_rule
-        elif lc_rule['Prefix'] == rules['rule2'].prefix:
-            assert lc_rule['Expiration']['Days'] == rules['rule2'].days
-            assert lc_rule['Status'] == rules['rule2'].status
-            assert 'ID' in lc_rule
-        else:
-            # neither of the rules we supplied was returned, something wrong
-            print("rules not right")
-            assert False
-
-# The test harness for lifecycle is configured to treat days as 10 second intervals.
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration():
-    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
-                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
-           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    response = client.list_objects(Bucket=bucket_name)
-    init_objects = response['Contents']
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    expire1_objects = response['Contents']
-
-    time.sleep(lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    keep2_objects = response['Contents']
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    expire3_objects = response['Contents']
-
-    assert len(init_objects) == 6
-    assert len(expire1_objects) == 4
-    assert len(keep2_objects) == 4
-    assert len(expire3_objects) == 2
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.list_objects_v2
-@pytest.mark.fails_on_dbstore
-def test_lifecyclev2_expiration():
-    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
-                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
-           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    response = client.list_objects_v2(Bucket=bucket_name)
-    init_objects = response['Contents']
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects_v2(Bucket=bucket_name)
-    expire1_objects = response['Contents']
-
-    time.sleep(lc_interval)
-    response = client.list_objects_v2(Bucket=bucket_name)
-    keep2_objects = response['Contents']
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects_v2(Bucket=bucket_name)
-    expire3_objects = response['Contents']
-
-    assert len(init_objects) == 6
-    assert len(expire1_objects) == 4
-    assert len(keep2_objects) == 4
-    assert len(expire3_objects) == 2
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-def test_lifecycle_expiration_versioning_enabled():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    create_multiple_versions(client, bucket_name, "test1/a", 1)
-    client.delete_object(Bucket=bucket_name, Key="test1/a")
-
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    versions = response['Versions']
-    delete_markers = response['DeleteMarkers']
-    assert len(versions) == 1
-    assert len(delete_markers) == 1
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-def test_lifecycle_expiration_tags1():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    tom_key = 'days1/tom'
-    tom_tagset = {'TagSet':
-                  [{'Key': 'tom', 'Value': 'sawyer'}]}
-
-    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
-                                         Tagging=tom_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lifecycle_config = {
-        'Rules': [
-            {
-                'Expiration': {
-                    'Days': 1,
-                },
-                'ID': 'rule_tag1',
-                'Filter': {
-                    'Prefix': 'days1/',
-                    'Tag': {
-                        'Key': 'tom',
-                        'Value': 'sawyer'
-                    },
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-
-    try:
-        expire_objects = response['Contents']
-    except KeyError:
-        expire_objects = []
-
-    assert len(expire_objects) == 0
-
-# factor out common setup code
-def setup_lifecycle_tags2(client, bucket_name):
-    tom_key = 'days1/tom'
-    tom_tagset = {'TagSet':
-                  [{'Key': 'tom', 'Value': 'sawyer'}]}
-
-    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
-                                         Tagging=tom_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    huck_key = 'days1/huck'
-    huck_tagset = {
-        'TagSet':
-        [{'Key': 'tom', 'Value': 'sawyer'},
-         {'Key': 'huck', 'Value': 'finn'}]}
-
-    client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
-                                         Tagging=huck_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lifecycle_config = {
-        'Rules': [
-            {
-                'Expiration': {
-                    'Days': 1,
-                },
-                'ID': 'rule_tag1',
-                'Filter': {
-                    'Prefix': 'days1/',
-                    'Tag': {
-                        'Key': 'tom',
-                        'Value': 'sawyer'
-                    },
-                    'And': {
-                        'Prefix': 'days1',
-                        'Tags': [
-                            {
-                                'Key': 'huck',
-                                'Value': 'finn'
-                            },
-                        ]
-                    }
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    return response
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_tags2():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = setup_lifecycle_tags2(client, bucket_name)
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    expire1_objects = response['Contents']
-
-    assert len(expire1_objects) == 1
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_versioned_tags2():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # mix in versioning
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    response = setup_lifecycle_tags2(client, bucket_name)
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(3*lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    expire1_objects = response['Contents']
-
-    assert len(expire1_objects) == 1
-
-# setup for scenario based on vidushi mishra's in rhbz#1877737
-def setup_lifecycle_noncur_tags(client, bucket_name, days):
-
-    # first create and tag the objects (10 versions of 1)
-    key = "myobject_"
-    tagset = {'TagSet':
-              [{'Key': 'vidushi', 'Value': 'mishra'}]}
-
-    for ix in range(10):
-        body = "%s v%d" % (key, ix)
-        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-        response = client.put_object_tagging(Bucket=bucket_name, Key=key,
-                                             Tagging=tagset)
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lifecycle_config = {
-        'Rules': [
-            {
-                'NoncurrentVersionExpiration': {
-                    'NoncurrentDays': days,
-                },
-                'ID': 'rule_tag1',
-                'Filter': {
-                    'Prefix': '',
-                    'Tag': {
-                        'Key': 'vidushi',
-                        'Value': 'mishra'
-                    },
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    return response
-
-def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
-    time.sleep(secs)
-    try:
-        response  = client.list_object_versions(Bucket=bucket_name)
-        objs_list = response['Versions']
-    except:
-        objs_list = []
-    return len(objs_list)
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_noncur_tags1():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    # create 10 object versions (9 noncurrent) and a tag-filter
-    # noncurrent version expiration at 4 "days"
-    response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
-
-    lc_interval = get_lc_debug_interval()
-
-    num_objs = verify_lifecycle_expiration_noncur_tags(
-        client, bucket_name, 2*lc_interval)
-
-    # at T+20, 10 objects should exist
-    assert num_objs == 10
-
-    num_objs = verify_lifecycle_expiration_noncur_tags(
-        client, bucket_name, 5*lc_interval)
-
-    # at T+60, only the current object version should exist
-    assert num_objs == 1
-
-def wait_interval_list_object_versions(client, bucket_name, secs):
-    time.sleep(secs)
-    try:
-        response  = client.list_object_versions(Bucket=bucket_name)
-        objs_list = response['Versions']
-    except:
-        objs_list = []
-    return len(objs_list)
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_newer_noncurrent():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    # create 10 object versions (9 noncurrent)
-    key = "myobject_"
-
-    for ix in range(10):
-        body = "%s v%d" % (key, ix)
-        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # add a lifecycle rule which sets newer-noncurrent-versions to 5
-    days = 1
-    lifecycle_config = {
-        'Rules': [
-            {
-                'NoncurrentVersionExpiration': {
-                    'NoncurrentDays': days,
-                    'NewerNoncurrentVersions': 5,
-                },
-                'ID': 'newer_noncurrent1',
-                'Filter': {
-                    'Prefix': '',
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lc_interval = get_lc_debug_interval()
-
-    num_objs = wait_interval_list_object_versions(
-        client, bucket_name, 2*lc_interval)
-
-    # at T+20, 6 objects should exist (1 current and (9 - 5) noncurrent)
-    assert num_objs == 6
-
-def get_byte_buffer(nbytes):
-    buf = BytesIO(b"")
-    for x in range(nbytes):
-        buf.write(b"b")
-    buf.seek(0)
-    return buf
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_size_gt():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    # create one object lt and one object gt 2000 bytes
-    key = "myobject_small"
-    body = get_byte_buffer(1000)
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    key = "myobject_big"
-    body = get_byte_buffer(3000)
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # add a lifecycle rule which expires objects greater than 2000 bytes
-    days = 1
-    lifecycle_config = {
-        'Rules': [
-            {
-                'Expiration': {
-                    'Days': days
-                },
-                'ID': 'object_gt1',
-                'Filter': {
-                    'Prefix': '',
-                    'ObjectSizeGreaterThan': 2000
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lc_interval = get_lc_debug_interval()
-    time.sleep(10*lc_interval)
-
-    # we should find only the small object present
-    response = client.list_objects(Bucket=bucket_name)
-    objects = response['Contents']
-
-    assert len(objects) == 1
-    assert objects[0]['Key'] == "myobject_small"
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_size_lt():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    # create one object lt and one object gt 2000 bytes
-    key = "myobject_small"
-    body = get_byte_buffer(1000)
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    key = "myobject_big"
-    body = get_byte_buffer(3000)
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # add a lifecycle rule which expires objects greater than 2000 bytes
-    days = 1
-    lifecycle_config = {
-        'Rules': [
-            {
-                'Expiration': {
-                    'Days': days
-                },
-                'ID': 'object_lt1',
-                'Filter': {
-                    'Prefix': '',
-                    'ObjectSizeLessThan': 2000
-                },
-                'Status': 'Enabled',
-            },
-        ]
-    }
-
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    lc_interval = get_lc_debug_interval()
-    time.sleep(2*lc_interval)
-
-    # we should find only the large object present
-    response = client.list_objects(Bucket=bucket_name)
-    objects = response['Contents']
-
-    assert len(objects) == 1
-    assert objects[0]['Key'] == "myobject_big"
-
-@pytest.mark.lifecycle
-def test_lifecycle_id_too_long():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.lifecycle
-def test_lifecycle_same_id():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
-           {'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.lifecycle
-def test_lifecycle_invalid_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_date():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_invalid_date():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_date():
-    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
-           {'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    response = client.list_objects(Bucket=bucket_name)
-    init_objects = response['Contents']
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(3*lc_interval)
-    response = client.list_objects(Bucket=bucket_name)
-    expire_objects = response['Contents']
-
-    assert len(init_objects) == 2
-    assert len(expire_objects) == 1
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-def test_lifecycle_expiration_days0():
-    bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
-    client = get_client()
-
-    rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-
-    # days: 0 is legal in a transition rule, but not legal in an
-    # expiration rule
-    response_code = ""
-    try:
-        response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    except botocore.exceptions.ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'InvalidArgument'
-
-
-def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
-                                    rule_prefix):
-    rules=[{'ID': rule_id,
-            'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
-            'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    key = rule_prefix + 'foo'
-    body = 'bar'
-    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    return response
-
-def check_lifecycle_expiration_header(response, start_time, rule_id,
-                                      delta_days):
-    expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
-    if (not expr_exists):
-        return False
-    expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
-
-    m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
-
-    expiration = dateutil.parser.parse(m.group(1))
-    days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
-    rule_eq_id = (m.group(2) == rule_id)
-
-    return  days_to_expire and rule_eq_id
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-def test_lifecycle_expiration_header_put():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    now = datetime.datetime.utcnow()
-    response = setup_lifecycle_expiration(
-        client, bucket_name, 'rule1', 1, 'days1/')
-    assert check_lifecycle_expiration_header(response, now, 'rule1', 1)
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_header_head():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    now = datetime.datetime.utcnow()
-    response = setup_lifecycle_expiration(
-        client, bucket_name, 'rule1', 1, 'days1/')
-
-    key = 'days1/' + 'foo'
-
-    # stat the object, check header
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert check_lifecycle_expiration_header(response, now, 'rule1', 1)
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_header_tags_head():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    lifecycle={
-        "Rules": [
-        {
-            "Filter": {
-                "Tag": {"Key": "key1", "Value": "tag1"}
-            },
-            "Status": "Enabled",
-            "Expiration": {
-                "Days": 1
-            },
-            "ID": "rule1"
-            },
-        ]
-    }
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    key1 = "obj_key1"
-    body1 = "obj_key1_body"
-    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
-          {'Key': 'key5','Value': 'tag5'}]}
-    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
-
-    # stat the object, check header
-    response = client.head_object(Bucket=bucket_name, Key=key1)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1)
-
-    # test that header is not returning when it should not
-    lifecycle={
-        "Rules": [
-        {
-            "Filter": {
-                "Tag": {"Key": "key2", "Value": "tag1"}
-            },
-            "Status": "Enabled",
-            "Expiration": {
-                "Days": 1
-            },
-            "ID": "rule1"
-            },
-        ]
-    }
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    # stat the object, check header
-    response = client.head_object(Bucket=bucket_name, Key=key1)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) == False
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_expiration_header_and_tags_head():
-    now = datetime.datetime.utcnow()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    lifecycle={
-        "Rules": [
-        {
-            "Filter": {
-                "And": {
-                    "Tags": [
-                        {
-                            "Key": "key1",
-                            "Value": "tag1"
-                        },
-                        {
-                            "Key": "key5",
-                            "Value": "tag6"
-                        }
-                    ]
-                }
-            },
-            "Status": "Enabled",
-            "Expiration": {
-                "Days": 1
-            },
-            "ID": "rule1"
-            },
-        ]
-    }
-    response = client.put_bucket_lifecycle_configuration(
-        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    key1 = "obj_key1"
-    body1 = "obj_key1_body"
-    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
-          {'Key': 'key5','Value': 'tag5'}]}
-    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
-
-    # stat the object, check header
-    response = client.head_object(Bucket=bucket_name, Key=key1)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1) == False
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_noncurrent():
-    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
-           {'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_noncur_expiration():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    create_multiple_versions(client, bucket_name, "test1/a", 3)
-    # not checking the object contents on the second run, because the function doesn't support multiple checks
-    create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    init_versions = response['Versions']
-
-    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(5*lc_interval)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    expire_versions = response['Versions']
-    assert len(init_versions) == 6
-    assert len(expire_versions) == 4
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_deletemarker():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_filter():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_empty_filter():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_deletemarker_expiration():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    create_multiple_versions(client, bucket_name, "test1/a", 1)
-    create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
-    client.delete_object(Bucket=bucket_name, Key="test1/a")
-    client.delete_object(Bucket=bucket_name, Key="test2/abc")
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    init_versions = response['Versions']
-    deleted_versions = response['DeleteMarkers']
-    total_init_versions = init_versions + deleted_versions
-
-    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(7*lc_interval)
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    init_versions = response['Versions']
-    deleted_versions = response['DeleteMarkers']
-    total_expire_versions = init_versions + deleted_versions
-
-    assert len(total_init_versions) == 4
-    assert len(total_expire_versions) == 2
-
-@pytest.mark.lifecycle
-def test_lifecycle_set_multipart():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules = [
-        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
-         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
-        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
-         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
-    ]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_multipart_expiration():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    key_names = ['test1/a', 'test2/']
-    upload_ids = []
-
-    for key in key_names:
-        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-        upload_ids.append(response['UploadId'])
-
-    response = client.list_multipart_uploads(Bucket=bucket_name)
-    init_uploads = response['Uploads']
-
-    rules = [
-        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
-         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
-    ]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(5*lc_interval)
-
-    response = client.list_multipart_uploads(Bucket=bucket_name)
-    expired_uploads = response['Uploads']
-    assert len(init_uploads) == 2
-    assert len(expired_uploads) == 1
-
-@pytest.mark.lifecycle
-def test_lifecycle_transition_set_invalid_date():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2023-09-27'},'Transitions': [{'Date': '20220927','StorageClass': 'GLACIER'}],'Prefix': 'test1/', 'Status':'Enabled'}]
-    lifecycle = {'Rules': rules}
-    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-def _test_encryption_sse_customer_write(file_size):
-    """
-    Tests Create a file of A's, use it to set_contents_from_file.
-    Create a file of B's, use it to re-set_contents_from_file.
-    Re-read the contents, and confirm we get B's
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'testobj'
-    data = 'A'*file_size
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == data
-
-# The test harness for lifecycle is configured to treat days as 10 second intervals.
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-@pytest.mark.fails_on_aws
-def test_lifecycle_transition():
-    sc = configured_storage_classes()
-    if len(sc) < 3:
-        pytest.skip('requires 3 or more storage classes')
-
-    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
-                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
-           {'ID': 'rule2', 'Transitions': [{'Days': 6, 'StorageClass': sc[2]}], 'Prefix': 'expire3/', 'Status': 'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    # Get list of all keys
-    response = client.list_objects(Bucket=bucket_name)
-    init_keys = _get_keys(response)
-    assert len(init_keys) == 6
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(4*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire1_keys['STANDARD']) == 4
-    assert len(expire1_keys[sc[1]]) == 2
-    assert len(expire1_keys[sc[2]]) == 0
-
-    # Wait for next expiration cycle
-    time.sleep(lc_interval)
-    keep2_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(keep2_keys['STANDARD']) == 4
-    assert len(keep2_keys[sc[1]]) == 2
-    assert len(keep2_keys[sc[2]]) == 0
-
-    # Wait for final expiration cycle
-    time.sleep(5*lc_interval)
-    expire3_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire3_keys['STANDARD']) == 2
-    assert len(expire3_keys[sc[1]]) == 2
-    assert len(expire3_keys[sc[2]]) == 2
-
-# The test harness for lifecycle is configured to treat days as 10 second intervals.
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-@pytest.mark.fails_on_aws
-def test_lifecycle_transition_single_rule_multi_trans():
-    sc = configured_storage_classes()
-    if len(sc) < 3:
-        pytest.skip('requires 3 or more storage classes')
-
-    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
-                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}, {'Days': 7, 'StorageClass': sc[2]}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    # Get list of all keys
-    response = client.list_objects(Bucket=bucket_name)
-    init_keys = _get_keys(response)
-    assert len(init_keys) == 6
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(5*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire1_keys['STANDARD']) == 4
-    assert len(expire1_keys[sc[1]]) == 2
-    assert len(expire1_keys[sc[2]]) == 0
-
-    # Wait for next expiration cycle
-    time.sleep(lc_interval)
-    keep2_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(keep2_keys['STANDARD']) == 4
-    assert len(keep2_keys[sc[1]]) == 2
-    assert len(keep2_keys[sc[2]]) == 0
-
-    # Wait for final expiration cycle
-    time.sleep(6*lc_interval)
-    expire3_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire3_keys['STANDARD']) == 4
-    assert len(expire3_keys[sc[1]]) == 0
-    assert len(expire3_keys[sc[2]]) == 2
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-def test_lifecycle_set_noncurrent_transition():
-    sc = configured_storage_classes()
-    if len(sc) < 3:
-        pytest.skip('requires 3 or more storage classes')
-
-    bucket = get_new_bucket()
-    client = get_client()
-    rules = [
-        {
-            'ID': 'rule1',
-            'Prefix': 'test1/',
-            'Status': 'Enabled',
-            'NoncurrentVersionTransitions': [
-                {
-                    'NoncurrentDays': 2,
-                    'StorageClass': sc[1]
-                },
-                {
-                    'NoncurrentDays': 4,
-                    'StorageClass': sc[2]
-                }
-            ],
-            'NoncurrentVersionExpiration': {
-                'NoncurrentDays': 6
-            }
-        },
-        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}}
-    ]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
-
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.lifecycle_transition
-@pytest.mark.fails_on_aws
-def test_lifecycle_noncur_transition():
-    sc = configured_storage_classes()
-    if len(sc) < 3:
-        pytest.skip('requires 3 or more storage classes')
-
-    bucket = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
-
-    rules = [
-        {
-            'ID': 'rule1',
-            'Prefix': 'test1/',
-            'Status': 'Enabled',
-            'NoncurrentVersionTransitions': [
-                {
-                    'NoncurrentDays': 1,
-                    'StorageClass': sc[1]
-                },
-                {
-                    'NoncurrentDays': 5,
-                    'StorageClass': sc[2]
-                }
-            ],
-            'NoncurrentVersionExpiration': {
-                'NoncurrentDays': 9
-            }
-        }
-    ]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
-
-    create_multiple_versions(client, bucket, "test1/a", 3)
-    create_multiple_versions(client, bucket, "test1/b", 3)
-
-    init_keys = list_bucket_storage_class(client, bucket)
-    assert len(init_keys['STANDARD']) == 6
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(4*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 4
-    assert len(expire1_keys[sc[2]]) == 0
-
-    time.sleep(4*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 0
-    assert len(expire1_keys[sc[2]]) == 4
-
-    time.sleep(6*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 0
-    assert len(expire1_keys[sc[2]]) == 0
-
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.lifecycle_transition
-def test_lifecycle_plain_null_version_current_transition():
-    sc = configured_storage_classes()
-    if len(sc) < 2:
-        pytest.skip('requires 2 or more storage classes')
-
-    target_sc = sc[1]
-    assert target_sc != 'STANDARD'
-
-    bucket = get_new_bucket()
-    check_versioning(bucket, None)
-
-    # create a plain object before enabling versioning;
-    # this will be transitioned as a current version
-    client = get_client()
-    key = 'testobjfoo'
-    content = 'fooz'
-    client.put_object(Bucket=bucket, Key=key, Body=content)
-
-    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
-
-    client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration={
-            'Rules': [
-                {
-                    'ID': 'rule1',
-                    'Prefix': 'testobj',
-                    'Status': 'Enabled',
-                    'Transitions': [
-                        {
-                            'Days': 1,
-                            'StorageClass': target_sc
-                        },
-                    ]
-                }
-            ]
-        })
-
-    lc_interval = get_lc_debug_interval()
-    time.sleep(4*lc_interval)
-
-    keys = list_bucket_storage_class(client, bucket)
-    assert len(keys['STANDARD']) == 0
-    assert len(keys[target_sc]) == 1
-
-def verify_object(client, bucket, key, content=None, sc=None):
-    response = client.get_object(Bucket=bucket, Key=key)
-
-    if (sc == None):
-        sc = 'STANDARD'
-
-    if ('StorageClass' in response):
-        assert response['StorageClass'] == sc
-    else: #storage class should be STANDARD
-        assert 'STANDARD' == sc
-
-    if (content != None):
-        body = _get_body(response)
-        assert body == content
-
-# The test harness for lifecycle is configured to treat days as 10 second intervals.
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-@pytest.mark.cloud_transition
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_cloud_transition():
-    cloud_sc = get_cloud_storage_class()
-    if cloud_sc == None:
-        pytest.skip('no cloud_storage_class configured')
-
-    retain_head_object = get_cloud_retain_head_object()
-    target_path = get_cloud_target_path()
-    target_sc = get_cloud_target_storage_class()
-
-    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
-    bucket_name = _create_objects(keys=keys)
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    # Get list of all keys
-    response = client.list_objects(Bucket=bucket_name)
-    init_keys = _get_keys(response)
-    assert len(init_keys) == 4
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(10*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire1_keys['STANDARD']) == 2
-
-    if (retain_head_object != None and retain_head_object == "true"):
-        assert len(expire1_keys[cloud_sc]) == 2
-    else:
-        assert len(expire1_keys[cloud_sc]) == 0
-
-    time.sleep(2*lc_interval)
-    # Check if objects copied to target path
-    if target_path == None:
-        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
-    prefix = bucket_name + "/"
-
-    cloud_client = get_cloud_client()
-
-    time.sleep(12*lc_interval)
-    expire1_key1_str = prefix + keys[0]
-    verify_object(cloud_client, target_path, expire1_key1_str, keys[0], target_sc)
-
-    expire1_key2_str = prefix + keys[1]
-    verify_object(cloud_client, target_path, expire1_key2_str, keys[1], target_sc)
-
-    # Now verify the object on source rgw
-    src_key = keys[0]
-    if (retain_head_object != None and retain_head_object == "true"):
-        # verify HEAD response
-        response = client.head_object(Bucket=bucket_name, Key=keys[0])
-        assert 0 == response['ContentLength']
-        assert cloud_sc == response['StorageClass']
-    
-        # GET should return InvalidObjectState error
-        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'InvalidObjectState'
-
-        # COPY of object should return InvalidObjectState error
-        copy_source = {'Bucket': bucket_name, 'Key': src_key}
-        e = assert_raises(ClientError, client.copy, CopySource=copy_source, Bucket=bucket_name, Key='copy_obj')
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 403
-        assert error_code == 'InvalidObjectState'
-
-        # DELETE should succeed
-        response = client.delete_object(Bucket=bucket_name, Key=src_key)
-        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 404
-        assert error_code == 'NoSuchKey'
-
-# Similar to 'test_lifecycle_transition' but for cloud transition
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-@pytest.mark.cloud_transition
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_cloud_multiple_transition():
-    cloud_sc = get_cloud_storage_class()
-    if cloud_sc == None:
-        pytest.skip('[s3 cloud] section missing cloud_storage_class')
-
-    retain_head_object = get_cloud_retain_head_object()
-    target_path = get_cloud_target_path()
-    target_sc = get_cloud_target_storage_class()
-
-    sc1 = get_cloud_regular_storage_class()
-
-    if (sc1 == None):
-        pytest.skip('[s3 cloud] section missing storage_class')
-
-    sc = ['STANDARD', sc1, cloud_sc]
-
-    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
-    bucket_name = _create_objects(keys=keys)
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc1}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
-           {'ID': 'rule2', 'Transitions': [{'Days': 5, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
-           {'ID': 'rule3', 'Expiration': {'Days': 9}, 'Prefix': 'expire1/', 'Status': 'Enabled'}]
-    lifecycle = {'Rules': rules}
-    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
-
-    # Get list of all keys
-    response = client.list_objects(Bucket=bucket_name)
-    init_keys = _get_keys(response)
-    assert len(init_keys) == 4
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(4*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 2
-    assert len(expire1_keys[sc[2]]) == 0
-
-    # Wait for next expiration cycle
-    time.sleep(7*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 0
-
-    if (retain_head_object != None and retain_head_object == "true"):
-        assert len(expire1_keys[sc[2]]) == 2
-    else:
-        assert len(expire1_keys[sc[2]]) == 0
-
-    # Wait for final expiration cycle
-    time.sleep(12*lc_interval)
-    expire3_keys = list_bucket_storage_class(client, bucket_name)
-    assert len(expire3_keys['STANDARD']) == 2
-    assert len(expire3_keys[sc[1]]) == 0
-    assert len(expire3_keys[sc[2]]) == 0
-
-# Noncurrent objects for cloud transition
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_expiration
-@pytest.mark.lifecycle_transition
-@pytest.mark.cloud_transition
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_noncur_cloud_transition():
-    cloud_sc = get_cloud_storage_class()
-    if cloud_sc == None:
-        pytest.skip('[s3 cloud] section missing cloud_storage_class')
-
-    retain_head_object = get_cloud_retain_head_object()
-    target_path = get_cloud_target_path()
-    target_sc = get_cloud_target_storage_class()
-
-    sc1 = get_cloud_regular_storage_class()
-    if (sc1 == None):
-        pytest.skip('[s3 cloud] section missing storage_class')
-
-    sc = ['STANDARD', sc1, cloud_sc]
-
-    bucket = get_new_bucket()
-    client = get_client()
-    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
-
-    rules = [
-        {
-            'ID': 'rule1',
-            'Prefix': 'test1/',
-            'Status': 'Enabled',
-            'NoncurrentVersionTransitions': [
-                {
-                    'NoncurrentDays': 1,
-                    'StorageClass': sc[1]
-                },
-                {
-                    'NoncurrentDays': 5,
-                    'StorageClass': sc[2]
-                }
-            ],
-        }
-    ]
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
-
-    keys = ['test1/a', 'test1/b']
-
-    for k in keys:
-        create_multiple_versions(client, bucket, k, 3)
-
-    init_keys = list_bucket_storage_class(client, bucket)
-    assert len(init_keys['STANDARD']) == 6
-
-    response  = client.list_object_versions(Bucket=bucket)
-
-    lc_interval = get_lc_debug_interval()
-
-    time.sleep(4*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 4
-    assert len(expire1_keys[sc[2]]) == 0
-
-    time.sleep(10*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 2
-    assert len(expire1_keys[sc[1]]) == 0
-
-    if (retain_head_object == None or retain_head_object == "false"):
-        assert len(expire1_keys[sc[2]]) == 0
-    else:
-        assert len(expire1_keys[sc[2]]) == 4
-
-    #check if versioned object exists on cloud endpoint
-    if target_path == None:
-        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
-    prefix = bucket + "/"
-
-    cloud_client = get_cloud_client()
-
-    time.sleep(lc_interval)
-    result = list_bucket_versions(client, bucket)
-
-    for src_key in keys:
-        for k in result[src_key]: 
-            expire1_key1_str = prefix + 'test1/a' + "-" + k['VersionId']
-            verify_object(cloud_client, target_path, expire1_key1_str, None, target_sc)
-
-# The test harness for lifecycle is configured to treat days as 10 second intervals.
-@pytest.mark.lifecycle
-@pytest.mark.lifecycle_transition
-@pytest.mark.cloud_transition
-@pytest.mark.fails_on_aws
-@pytest.mark.fails_on_dbstore
-def test_lifecycle_cloud_transition_large_obj():
-    cloud_sc = get_cloud_storage_class()
-    if cloud_sc == None:
-        pytest.skip('[s3 cloud] section missing cloud_storage_class')
-
-    retain_head_object = get_cloud_retain_head_object()
-    target_path = get_cloud_target_path()
-    target_sc = get_cloud_target_storage_class()
-
-    bucket = get_new_bucket()
-    client = get_client()
-    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
-
-    keys = ['keep/multi', 'expire1/multi']
-    size = 9*1024*1024
-    data = 'A'*size
-
-    for k in keys:
-        client.put_object(Bucket=bucket, Body=data, Key=k)
-        verify_object(client, bucket, k, data)
-
-    lifecycle = {'Rules': rules}
-    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
-
-    lc_interval = get_lc_debug_interval()
-
-    # Wait for first expiration (plus fudge to handle the timer window)
-    time.sleep(8*lc_interval)
-    expire1_keys = list_bucket_storage_class(client, bucket)
-    assert len(expire1_keys['STANDARD']) == 1
-
-    
-    if (retain_head_object != None and retain_head_object == "true"):
-        assert len(expire1_keys[cloud_sc]) == 1
-    else:
-        assert len(expire1_keys[cloud_sc]) == 0
-
-    # Check if objects copied to target path
-    if target_path == None:
-        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
-    prefix = bucket + "/"
-
-    # multipart upload takes time
-    time.sleep(12*lc_interval)
-    cloud_client = get_cloud_client()
-
-    expire1_key1_str = prefix + keys[1]
-    verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc)
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encrypted_transfer_1b():
-    _test_encryption_sse_customer_write(1)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encrypted_transfer_1kb():
-    _test_encryption_sse_customer_write(1024)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encrypted_transfer_1MB():
-    _test_encryption_sse_customer_write(1024*1024)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encrypted_transfer_13b():
-    _test_encryption_sse_customer_write(13)
-
-
-@pytest.mark.encryption
-def test_encryption_sse_c_method_head():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*1000
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.HeadObject', lf)
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-@pytest.mark.encryption
-def test_encryption_sse_c_present():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*1000
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-def test_encryption_sse_c_other_key():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*100
-    key = 'testobj'
-    sse_client_headers_A = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-    sse_client_headers_B = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
-        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-def test_encryption_sse_c_invalid_md5():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*100
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-def test_encryption_sse_c_no_md5():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*100
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-
-@pytest.mark.encryption
-def test_encryption_sse_c_no_key():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*100
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-
-@pytest.mark.encryption
-def test_encryption_key_no_sse_c():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    data = 'A'*100
-    key = 'testobj'
-    sse_client_headers = {
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
-    """
-    generate a multi-part upload for a random file of specifed size,
-    if requested, generate a list of the parts
-    return the upload descriptor
-    """
-    if client == None:
-        client = get_client()
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
-    client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
-    if metadata == None:
-        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
-    else:
-        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
-
-    upload_id = response['UploadId']
-    s = ''
-    parts = []
-    for i, part in enumerate(generate_random(size, part_size)):
-        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
-        part_num = i+1
-        s += part
-        lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
-        client.meta.events.register('before-call.s3.UploadPart', lf)
-        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
-        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
-        if i in resend_parts:
-            lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
-            client.meta.events.register('before-call.s3.UploadPart', lf)
-            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
-
-    return (upload_id, s, parts)
-
-def _check_content_using_range_enc(client, bucket_name, key, data, size, step, enc_headers=None):
-    for ofs in range(0, size, step):
-        toread = size - ofs
-        if toread > step:
-            toread = step
-        end = ofs + toread - 1
-        lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-        client.meta.events.register('before-call.s3.GetObject', lf)
-        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
-        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
-        read_range = response['ContentLength']
-        body = _get_body(response)
-        assert read_range == toread
-        assert body == data[ofs:end+1]
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encryption_sse_c_multipart_upload():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    partlen = 5*1024*1024
-    metadata = {'foo': 'bar'}
-    enc_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
-        'Content-Type': content_type
-    }
-    resend_parts = []
-
-    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
-            part_size=partlen, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-
-    assert response['Metadata'] == metadata
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
-
-    body = _get_body(response)
-    assert body == data
-    size = response['ContentLength']
-    assert len(body) == size
-
-    _check_content_using_range_enc(client, bucket_name, key, data, size, 1000000, enc_headers=enc_headers)
-    _check_content_using_range_enc(client, bucket_name, key, data, size, 10000000, enc_headers=enc_headers)
-    for i in range(-1,2):
-        _check_content_using_range_enc(client, bucket_name, key, data, size, partlen + i, enc_headers=enc_headers)
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encryption_sse_c_unaligned_multipart_upload():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    partlen = 1 + 5 * 1024 * 1024 # not a multiple of the 4k encryption block size
-    metadata = {'foo': 'bar'}
-    enc_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
-        'Content-Type': content_type
-    }
-    resend_parts = []
-
-    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
-            part_size=partlen, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-
-    assert response['Metadata'] == metadata
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
-
-    body = _get_body(response)
-    assert body == data
-    size = response['ContentLength']
-    assert len(body) == size
-
-    _check_content_using_range_enc(client, bucket_name, key, data, size, 1000000, enc_headers=enc_headers)
-    _check_content_using_range_enc(client, bucket_name, key, data, size, 10000000, enc_headers=enc_headers)
-    for i in range(-1,2):
-        _check_content_using_range_enc(client, bucket_name, key, data, size, partlen + i, enc_headers=enc_headers)
-
-@pytest.mark.encryption
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-def test_encryption_sse_c_multipart_invalid_chunks_1():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    init_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
-        'Content-Type': content_type
-    }
-    part_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
-        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
-    }
-    resend_parts = []
-
-    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
-            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-def test_encryption_sse_c_multipart_invalid_chunks_2():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    init_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
-        'Content-Type': content_type
-    }
-    part_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
-    }
-    resend_parts = []
-
-    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
-            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-def test_encryption_sse_c_multipart_bad_download():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    put_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
-        'Content-Type': content_type
-    }
-    get_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
-        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
-    }
-    resend_parts = []
-
-    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
-            part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
-    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-
-    assert response['Metadata'] == metadata
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_encryption_sse_c_post_object_authenticated_request():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
-    ["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
-    ["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),
-    ('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
-    ('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
-    ('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
-    ('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-
-    get_headers = {
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
-    """
-    Tests Create a file of A's, use it to set_contents_from_file.
-    Create a file of B's, use it to re-set_contents_from_file.
-    Re-read the contents, and confirm we get B's
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': key_id
-    }
-    data = 'A'*file_size
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
-
-    response = client.get_object(Bucket=bucket_name, Key='testobj')
-    body = _get_body(response)
-    assert body == data
-
-
-
-
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_method_head():
-    kms_keyid = get_main_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
-    }
-    data = 'A'*1000
-    key = 'testobj'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.HeadObject', lf)
-    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_present():
-    kms_keyid = get_main_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
-    }
-    data = 'A'*100
-    key = 'testobj'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == data
-
-@pytest.mark.encryption
-def test_sse_kms_no_key():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-    }
-    data = 'A'*100
-    key = 'testobj'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-
-
-@pytest.mark.encryption
-def test_sse_kms_not_declared():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
-    }
-    data = 'A'*100
-    key = 'testobj'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_multipart_upload():
-    kms_keyid = get_main_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    enc_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
-        'Content-Type': content_type
-    }
-    resend_parts = []
-
-    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
-            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
-    client.meta.events.register('before-call.s3.UploadPart', lf)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-
-    assert response['Metadata'] == metadata
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
-
-    body = _get_body(response)
-    assert body == data
-    size = response['ContentLength']
-    assert len(body) == size
-
-    _check_content_using_range(key, bucket_name, data, 1000000)
-    _check_content_using_range(key, bucket_name, data, 10000000)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_multipart_invalid_chunks_1():
-    kms_keyid = get_main_kms_keyid()
-    kms_keyid2 = get_secondary_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/bla'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    init_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
-        'Content-Type': content_type
-    }
-    part_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
-    }
-    resend_parts = []
-
-    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
-            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
-            resend_parts=resend_parts)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_multipart_invalid_chunks_2():
-    kms_keyid = get_main_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    init_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
-        'Content-Type': content_type
-    }
-    part_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
-    }
-    resend_parts = []
-
-    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
-            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
-            resend_parts=resend_parts)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_post_object_authenticated_request():
-    kms_keyid = get_main_kms_keyid()
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [\
-    {"bucket": bucket_name},\
-    ["starts-with", "$key", "foo"],\
-    {"acl": "private"},\
-    ["starts-with", "$Content-Type", "text/plain"],\
-    ["starts-with", "$x-amz-server-side-encryption", ""], \
-    ["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
-    ["content-length-range", 0, 1024]\
-    ]\
-    }
-
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),
-    ('x-amz-server-side-encryption', 'aws:kms'), \
-    ('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
-    ('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_transfer_1b():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    _test_sse_kms_customer_write(1, key_id = kms_keyid)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_transfer_1kb():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    _test_sse_kms_customer_write(1024, key_id = kms_keyid)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_transfer_1MB():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    _test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
-
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_transfer_13b():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    _test_sse_kms_customer_write(13, key_id = kms_keyid)
-
-
-@pytest.mark.encryption
-def test_sse_kms_read_declare():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    sse_kms_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
-    }
-    data = 'A'*100
-    key = 'testobj'
-
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.bucket_policy
-def test_bucket_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects(Bucket=bucket_name)
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-@pytest.mark.list_objects_v2
-def test_bucketv2_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects_v2(Bucket=bucket_name)
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-def test_bucket_policy_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document =  json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Deny",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    client.delete_bucket_policy(Bucket=bucket_name)
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-
-@pytest.mark.bucket_policy
-@pytest.mark.list_objects_v2
-def test_bucketv2_policy_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document =  json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Deny",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    client.delete_bucket_policy(Bucket=bucket_name)
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-
-@pytest.mark.bucket_policy
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-def test_bucket_policy_different_tenant():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3::*:" + bucket_name
-    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    # TODO: figure out how to change the bucketname
-    def change_bucket_name(**kwargs):
-        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
-        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
-        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
-        print(kwargs['request_signer'])
-        print(kwargs)
-
-    #bucket_name = ":" + bucket_name
-    tenant_client = get_tenant_client()
-    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
-    response = tenant_client.list_objects(Bucket=bucket_name)
-    #alt_client = get_alt_client()
-    #response = alt_client.list_objects(Bucket=bucket_name)
-
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-@pytest.mark.list_objects_v2
-def test_bucketv2_policy_different_tenant():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-
-    resource1 = "arn:aws:s3::*:" + bucket_name
-    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    # TODO: figure out how to change the bucketname
-    def change_bucket_name(**kwargs):
-        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
-        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
-        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
-        print(kwargs['request_signer'])
-        print(kwargs)
-
-    #bucket_name = ":" + bucket_name
-    tenant_client = get_tenant_client()
-    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
-    response = tenant_client.list_objects_v2(Bucket=bucket_name)
-    #alt_client = get_alt_client()
-    #response = alt_client.list_objects_v2(Bucket=bucket_name)
-
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-def test_bucket_policy_another_bucket():
-    bucket_name = get_new_bucket()
-    bucket_name2 = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    key2 = 'abcd'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "arn:aws:s3:::*",
-            "arn:aws:s3:::*/*"
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    response = client.get_bucket_policy(Bucket=bucket_name)
-    response_policy = response['Policy']
-
-    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects(Bucket=bucket_name)
-    assert len(response['Contents']) == 1
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects(Bucket=bucket_name2)
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-@pytest.mark.list_objects_v2
-def test_bucketv2_policy_another_bucket():
-    bucket_name = get_new_bucket()
-    bucket_name2 = get_new_bucket()
-    client = get_client()
-    key = 'asdf'
-    key2 = 'abcd'
-    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
-    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "arn:aws:s3:::*",
-            "arn:aws:s3:::*/*"
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    response = client.get_bucket_policy(Bucket=bucket_name)
-    response_policy = response['Policy']
-
-    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects_v2(Bucket=bucket_name)
-    assert len(response['Contents']) == 1
-
-    alt_client = get_alt_client()
-    response = alt_client.list_objects_v2(Bucket=bucket_name2)
-    assert len(response['Contents']) == 1
-
-@pytest.mark.bucket_policy
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-def test_bucket_policy_set_condition_operator_end_with_IfExists():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'foo'
-    client.put_object(Bucket=bucket_name, Key=key)
-    policy = '''{
-      "Version":"2012-10-17",
-      "Statement": [{
-        "Sid": "Allow Public Access to All Objects",
-        "Effect": "Allow",
-        "Principal": "*",
-        "Action": "s3:GetObject",
-        "Condition": {
-                    "StringLikeIfExists": {
-                        "aws:Referer": "http://www.example.com/*"
-                    }
-                },
-        "Resource": "arn:aws:s3:::%s/*"
-      }
-     ]
-    }''' % bucket_name
-    # boto3.set_stream_logger(name='botocore')
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
-
-    request_headers={'referer': 'http://www.example.com/'}
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    request_headers={'referer': 'http://www.example.com/index.html'}
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # the 'referer' headers need to be removed for this one
-    #response = client.get_object(Bucket=bucket_name, Key=key)
-    #assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    request_headers={'referer': 'http://example.com'}
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
-    client.meta.events.register('before-call.s3.GetObject', lf)
-
-    # TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    response =  client.get_bucket_policy(Bucket=bucket_name)
-    print(response)
-
-def _create_simple_tagset(count):
-    tagset = []
-    for i in range(count):
-        tagset.append({'Key': str(i), 'Value': str(i)})
-
-    return {'TagSet': tagset}
-
-def _make_random_string(size):
-    return ''.join(random.choice(string.ascii_letters) for _ in range(size))
-
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_get_obj_tagging():
-    key = 'testputtags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    input_tagset = _create_simple_tagset(2)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-
-@pytest.mark.tagging
-def test_get_obj_head_tagging():
-    key = 'testputtags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-    count = 2
-
-    input_tagset = _create_simple_tagset(count)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'] == str(count)
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_put_max_tags():
-    key = 'testputmaxtags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    input_tagset = _create_simple_tagset(10)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-@pytest.mark.tagging
-def test_put_excess_tags():
-    key = 'testputmaxtags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    input_tagset = _create_simple_tagset(11)
-    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidTag'
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert len(response['TagSet']) == 0
-
-@pytest.mark.tagging
-def test_put_max_kvsize_tags():
-    key = 'testputmaxkeysize'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    tagset = []
-    for i in range(10):
-        k = _make_random_string(128)
-        v = _make_random_string(256)
-        tagset.append({'Key': k, 'Value': v})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    for kv_pair in response['TagSet']:
-        assert kv_pair in input_tagset['TagSet']
-
-@pytest.mark.tagging
-def test_put_excess_key_tags():
-    key = 'testputexcesskeytags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    tagset = []
-    for i in range(10):
-        k = _make_random_string(129)
-        v = _make_random_string(256)
-        tagset.append({'Key': k, 'Value': v})
-
-    input_tagset = {'TagSet': tagset}
-
-    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidTag'
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert len(response['TagSet']) == 0
-
-@pytest.mark.tagging
-def test_put_excess_val_tags():
-    key = 'testputexcesskeytags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    tagset = []
-    for i in range(10):
-        k = _make_random_string(128)
-        v = _make_random_string(257)
-        tagset.append({'Key': k, 'Value': v})
-
-    input_tagset = {'TagSet': tagset}
-
-    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidTag'
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert len(response['TagSet']) == 0
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_put_modify_tags():
-    key = 'testputmodifytags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    tagset = []
-    tagset.append({'Key': 'key', 'Value': 'val'})
-    tagset.append({'Key': 'key2', 'Value': 'val2'})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-    tagset2 = []
-    tagset2.append({'Key': 'key3', 'Value': 'val3'})
-
-    input_tagset2 = {'TagSet': tagset2}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset2['TagSet']
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_put_delete_tags():
-    key = 'testputmodifytags'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    input_tagset = _create_simple_tagset(2)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-    response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert len(response['TagSet']) == 0
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_post_object_tags_anonymous_request():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    url = _get_post_url(bucket_name)
-    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
-
-    key_name = "foo.txt"
-    input_tagset = _create_simple_tagset(2)
-    # xml_input_tagset is the same as input_tagset in xml.
-    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
-    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
-
-
-    payload = OrderedDict([
-        ("key" , key_name),
-        ("acl" , "public-read"),
-        ("Content-Type" , "text/plain"),
-        ("tagging", xml_input_tagset),
-        ('file', ('bar')),
-    ])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key=key_name)
-    body = _get_body(response)
-    assert body == 'bar'
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-@pytest.mark.tagging
-def test_post_object_tags_authenticated_request():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
-    "conditions": [
-    {"bucket": bucket_name},
-        ["starts-with", "$key", "foo"],
-        {"acl": "private"},
-        ["starts-with", "$Content-Type", "text/plain"],
-        ["content-length-range", 0, 1024],
-        ["starts-with", "$tagging", ""]
-    ]}
-
-    # xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
-    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
-    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([
-        ("key" , "foo.txt"),
-        ("AWSAccessKeyId" , aws_access_key_id),\
-        ("acl" , "private"),("signature" , signature),("policy" , policy),\
-        ("tagging", xml_input_tagset),
-        ("Content-Type" , "text/plain"),
-        ('file', ('bar'))])
-
-    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
-    assert r.status_code == 204
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    body = _get_body(response)
-    assert body == 'bar'
-
-
-@pytest.mark.tagging
-@pytest.mark.fails_on_dbstore
-def test_put_obj_with_tags():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'testtagobj1'
-    data = 'A'*100
-
-    tagset = []
-    tagset.append({'Key': 'bar', 'Value': ''})
-    tagset.append({'Key': 'foo', 'Value': 'bar'})
-
-    put_obj_tag_headers = {
-        'x-amz-tagging' : 'foo=bar&bar'
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-    response = client.get_object(Bucket=bucket_name, Key=key)
-    body = _get_body(response)
-    assert body == data
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    response_tagset = response['TagSet']
-    tagset = tagset
-    assert response_tagset == tagset
-
-def _make_arn_resource(path="*"):
-    return "arn:aws:s3:::{}".format(path)
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_get_tags_acl_public():
-    key = 'testputtagsacl'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
-    policy_document = make_json_policy("s3:GetObjectTagging",
-                                       resource)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    input_tagset = _create_simple_tagset(10)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-
-    response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_put_tags_acl_public():
-    key = 'testputtagsacl'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
-    policy_document = make_json_policy("s3:PutObjectTagging",
-                                       resource)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    input_tagset = _create_simple_tagset(10)
-    alt_client = get_alt_client()
-    response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['TagSet'] == input_tagset['TagSet']
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-def test_delete_tags_obj_public():
-    key = 'testputtagsacl'
-    bucket_name = _create_key_with_random_content(key)
-    client = get_client()
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
-    policy_document = make_json_policy("s3:DeleteObjectTagging",
-                                       resource)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    input_tagset = _create_simple_tagset(10)
-    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-
-    response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
-    assert len(response['TagSet']) == 0
-
-def test_versioning_bucket_atomic_upload_return_version_id():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'bar'
-
-    # for versioning-enabled-bucket, an non-empty version-id should return
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-    response = client.put_object(Bucket=bucket_name, Key=key)
-    version_id = response['VersionId']
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    versions = response['Versions']
-    for version in versions:
-        assert version['VersionId'] == version_id
-
-
-    # for versioning-default-bucket, no version-id should return.
-    bucket_name = get_new_bucket()
-    key = 'baz'
-    response = client.put_object(Bucket=bucket_name, Key=key)
-    assert not 'VersionId' in response
-
-    # for versioning-suspended-bucket, no version-id should return.
-    bucket_name = get_new_bucket()
-    key = 'baz'
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-    response = client.put_object(Bucket=bucket_name, Key=key)
-    assert not 'VersionId' in response
-
-def test_versioning_bucket_multipart_upload_return_version_id():
-    content_type='text/bla'
-    objlen = 30 * 1024 * 1024
-
-    bucket_name = get_new_bucket()
-    client = get_client()
-    key = 'bar'
-    metadata={'foo': 'baz'}
-
-    # for versioning-enabled-bucket, an non-empty version-id should return
-    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
-
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    version_id = response['VersionId']
-
-    response  = client.list_object_versions(Bucket=bucket_name)
-    versions = response['Versions']
-    for version in versions:
-        assert version['VersionId'] == version_id
-
-    # for versioning-default-bucket, no version-id should return.
-    bucket_name = get_new_bucket()
-    key = 'baz'
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
-
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert not 'VersionId' in response
-
-    # for versioning-suspended-bucket, no version-id should return
-    bucket_name = get_new_bucket()
-    key = 'foo'
-    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
-
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
-
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert not 'VersionId' in response
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_get_obj_existing_tag():
-    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
-    client = get_client()
-
-    tag_conditional = {"StringEquals": {
-        "s3:ExistingObjectTag/security" : "public"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObject",
-                                       resource,
-                                       conditions=tag_conditional)
-
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    tagset = []
-    tagset.append({'Key': 'security', 'Value': 'public'})
-    tagset.append({'Key': 'foo', 'Value': 'bar'})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset2 = []
-    tagset2.append({'Key': 'security', 'Value': 'private'})
-
-    input_tagset = {'TagSet': tagset2}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset3 = []
-    tagset3.append({'Key': 'security1', 'Value': 'public'})
-
-    input_tagset = {'TagSet': tagset3}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-    response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_get_obj_tagging_existing_tag():
-    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
-    client = get_client()
-
-    tag_conditional = {"StringEquals": {
-        "s3:ExistingObjectTag/security" : "public"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObjectTagging",
-                                       resource,
-                                       conditions=tag_conditional)
-
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    tagset = []
-    tagset.append({'Key': 'security', 'Value': 'public'})
-    tagset.append({'Key': 'foo', 'Value': 'bar'})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset2 = []
-    tagset2.append({'Key': 'security', 'Value': 'private'})
-
-    input_tagset = {'TagSet': tagset2}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset3 = []
-    tagset3.append({'Key': 'security1', 'Value': 'public'})
-
-    input_tagset = {'TagSet': tagset3}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-    response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # A get object itself should fail since we allowed only GetObjectTagging
-    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_put_obj_tagging_existing_tag():
-    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
-    client = get_client()
-
-    tag_conditional = {"StringEquals": {
-        "s3:ExistingObjectTag/security" : "public"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:PutObjectTagging",
-                                       resource,
-                                       conditions=tag_conditional)
-
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    tagset = []
-    tagset.append({'Key': 'security', 'Value': 'public'})
-    tagset.append({'Key': 'foo', 'Value': 'bar'})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset2 = []
-    tagset2.append({'Key': 'security', 'Value': 'private'})
-
-    input_tagset = {'TagSet': tagset2}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-    # PUT requests with object tagging are a bit wierd, if you forget to put
-    # the tag which is supposed to be existing anymore well, well subsequent
-    # put requests will fail
-
-    testtagset1 = []
-    testtagset1.append({'Key': 'security', 'Value': 'public'})
-    testtagset1.append({'Key': 'foo', 'Value': 'bar'})
-
-    input_tagset = {'TagSet': testtagset1}
-
-    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    testtagset2 = []
-    testtagset2.append({'Key': 'security', 'Value': 'private'})
-
-    input_tagset = {'TagSet': testtagset2}
-
-    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # Now try putting the original tags again, this should fail
-    input_tagset = {'TagSet': testtagset1}
-
-    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_put_obj_copy_source():
-    bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
-    client = get_client()
-
-    src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObject",
-                                       src_resource)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    bucket_name2 = get_new_bucket()
-
-    tag_conditional = {"StringLike": {
-        "s3:x-amz-copy-source" : bucket_name + "/public/*"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
-    policy_document = make_json_policy("s3:PutObject",
-                                       resource,
-                                       conditions=tag_conditional)
-
-    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
-
-    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
-
-    # This is possible because we are still the owner, see the grants with
-    # policy on how to do this right
-    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
-    body = _get_body(response)
-    assert body == 'public/foo'
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
-    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
-
-    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
-    body = _get_body(response)
-    assert body == 'public/bar'
-
-    copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
-    check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_put_obj_copy_source_meta():
-    src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
-    client = get_client()
-
-    src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObject",
-                                       src_resource)
-
-    client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
-
-    bucket_name = get_new_bucket()
-
-    tag_conditional = {"StringEquals": {
-        "s3:x-amz-metadata-directive" : "COPY"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:PutObject",
-                                       resource,
-                                       conditions=tag_conditional)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
-    alt_client.meta.events.register('before-call.s3.CopyObject', lf)
-
-    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
-    alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
-
-    # This is possible because we are still the owner, see the grants with
-    # policy on how to do this right
-    response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
-    body = _get_body(response)
-    assert body == 'public/foo'
-
-    # remove the x-amz-metadata-directive header
-    def remove_header(**kwargs):
-        if ("x-amz-metadata-directive" in kwargs['params']['headers']):
-            del kwargs['params']['headers']["x-amz-metadata-directive"]
-
-    alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
-
-    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
-    check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
-
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-def test_bucket_policy_put_obj_acl():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # An allow conditional will require atleast the presence of an x-amz-acl
-    # attribute a Deny conditional would negate any requests that try to set a
-    # public-read/write acl
-    conditional = {"StringLike": {
-        "s3:x-amz-acl" : "public*"
-    }}
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    s1 = Statement("s3:PutObject",resource)
-    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
-
-    policy_document = p.add_statement(s1).add_statement(s2).to_json()
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    key1 = 'private-key'
-
-    # if we want to be really pedantic, we should check that this doesn't raise
-    # and mark a failure, however if this does raise nosetests would mark this
-    # as an ERROR anyway
-    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
-    #response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    key2 = 'public-key'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
-    alt_client.meta.events.register('before-call.s3.PutObject', lf)
-
-    e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-@pytest.mark.bucket_policy
-def test_bucket_policy_put_obj_grant():
-
-    bucket_name = get_new_bucket()
-    bucket_name2 = get_new_bucket()
-    client = get_client()
-
-    # In normal cases a key owner would be the uploader of a key in first case
-    # we explicitly require that the bucket owner is granted full control over
-    # the object uploaded by any user, the second bucket is where no such
-    # policy is enforced meaning that the uploader still retains ownership
-
-    main_user_id = get_main_user_id()
-    alt_user_id = get_alt_user_id()
-
-    owner_id_str = "id=" + main_user_id
-    s3_conditional = {"StringEquals": {
-        "s3:x-amz-grant-full-control" : owner_id_str
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:PutObject",
-                                       resource,
-                                       conditions=s3_conditional)
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
-    policy_document2 = make_json_policy("s3:PutObject", resource)
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
-
-    alt_client = get_alt_client()
-    key1 = 'key1'
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
-    alt_client.meta.events.register('before-call.s3.PutObject', lf)
-
-    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    def remove_header(**kwargs):
-        if ("x-amz-grant-full-control" in kwargs['params']['headers']):
-            del kwargs['params']['headers']["x-amz-grant-full-control"]
-
-    alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
-
-    key2 = 'key2'
-    response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
-
-    # user 1 is trying to get acl for the object from user2 where ownership
-    # wasn't transferred
-    check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
-
-    acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
-
-    assert acl1_response['Grants'][0]['Grantee']['ID'] == main_user_id
-    assert acl2_response['Grants'][0]['Grantee']['ID'] == alt_user_id
-
-
-@pytest.mark.encryption
-def test_put_obj_enc_conflict_c_s3():
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    key1_str ='testobj'
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption' : 'AES256',
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.encryption
-def test_put_obj_enc_conflict_c_kms():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-once'
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    key1_str ='testobj'
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption' : 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
-        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
-        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
-        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.encryption
-def test_put_obj_enc_conflict_s3_kms():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-once'
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    key1_str ='testobj'
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption' : 'AES256',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.encryption
-def test_put_obj_enc_conflict_bad_enc_kms():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-once'
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    key1_str ='testobj'
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption' : 'aes:kms',    # aes != aws
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidArgument'
-
-@pytest.mark.encryption
-@pytest.mark.bucket_policy
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_put_obj_s3_noenc():
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    deny_incorrect_algo = {
-        "StringNotEquals": {
-          "s3:x-amz-server-side-encryption": "AES256"
-        }
-    }
-
-    deny_unencrypted_obj = {
-        "Null" : {
-          "s3:x-amz-server-side-encryption": "true"
-        }
-    }
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
-    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
-    policy_document = p.add_statement(s1).add_statement(s2).to_json()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    key1_str ='testobj'
-
-    #response = client.get_bucket_policy(Bucket=bucket_name)
-    #print response
-
-
-    # doing this here breaks the next request w/ 400 (non-sse bug).  Do it last.
-    #check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-    #TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
-    # DEBUGGING: run the boto2 and compare the requests
-    # DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
-    # DEBUGGING: try to add other options to put_object to see if that makes the response better
-
-    # first validate that writing a sse-s3 object works
-    response = client.put_object(Bucket=bucket_name, Key=key1_str, ServerSideEncryption='AES256')
-    response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption']
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-
-    # then validate that a non-encrypted object fails.
-    # (this also breaks the connection--non-sse bug, probably because the server
-    #  errors out before it consumes the data...)
-    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_policy
-@pytest.mark.sse_s3
-def test_bucket_policy_put_obj_s3_kms():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-twice'
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    deny_incorrect_algo = {
-        "StringNotEquals": {
-          "s3:x-amz-server-side-encryption": "AES256"
-        }
-    }
-
-    deny_unencrypted_obj = {
-        "Null" : {
-          "s3:x-amz-server-side-encryption": "true"
-        }
-    }
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
-    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
-    policy_document = p.add_statement(s1).add_statement(s2).to_json()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    key1_str ='testobj'
-
-    #response = client.get_bucket_policy(Bucket=bucket_name)
-    #print response
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption': 'aws:kms',
-        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-@pytest.mark.encryption
-@pytest.mark.fails_on_dbstore
-@pytest.mark.bucket_policy
-def test_bucket_policy_put_obj_kms_noenc():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    deny_incorrect_algo = {
-        "StringNotEquals": {
-          "s3:x-amz-server-side-encryption": "aws:kms"
-        }
-    }
-
-    deny_unencrypted_obj = {
-        "Null" : {
-          "s3:x-amz-server-side-encryption": "true"
-        }
-    }
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
-    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
-    policy_document = p.add_statement(s1).add_statement(s2).to_json()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    key1_str ='testobj'
-    key2_str ='unicorn'
-
-    #response = client.get_bucket_policy(Bucket=bucket_name)
-    #print response
-
-    # must do check_access_denied last - otherwise, pending data
-    #  breaks next call...
-    response = client.put_object(Bucket=bucket_name, Key=key1_str,
-         ServerSideEncryption='aws:kms', SSEKMSKeyId=kms_keyid)
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
-
-    check_access_denied(client.put_object, Bucket=bucket_name, Key=key2_str, Body=key2_str)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_policy
-def test_bucket_policy_put_obj_kms_s3():
-    bucket_name = get_new_bucket()
-    client = get_v2_client()
-
-    deny_incorrect_algo = {
-        "StringNotEquals": {
-          "s3:x-amz-server-side-encryption": "aws:kms"
-        }
-    }
-
-    deny_unencrypted_obj = {
-        "Null" : {
-          "s3:x-amz-server-side-encryption": "true"
-        }
-    }
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
-    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
-    policy_document = p.add_statement(s1).add_statement(s2).to_json()
-
-    # boto3.set_stream_logger(name='botocore')
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    key1_str ='testobj'
-
-    #response = client.get_bucket_policy(Bucket=bucket_name)
-    #print response
-
-    sse_client_headers = {
-        'x-amz-server-side-encryption' : 'AES256',
-    }
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-# TODO: remove this fails_on_rgw when I fix it
-@pytest.mark.fails_on_rgw
-def test_bucket_policy_put_obj_request_obj_tag():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    tag_conditional = {"StringEquals": {
-        "s3:RequestObjectTag/security" : "public"
-    }}
-
-    p = Policy()
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
-    policy_document = p.add_statement(s1).to_json()
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    alt_client = get_alt_client()
-    key1_str ='testobj'
-    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-    headers = {"x-amz-tagging" : "security=public"}
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
-    client.meta.events.register('before-call.s3.PutObject', lf)
-    #TODO: why is this a 400 and not passing
-    alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
-
-@pytest.mark.tagging
-@pytest.mark.bucket_policy
-@pytest.mark.fails_on_dbstore
-def test_bucket_policy_get_obj_acl_existing_tag():
-    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
-    client = get_client()
-
-    tag_conditional = {"StringEquals": {
-        "s3:ExistingObjectTag/security" : "public"
-    }}
-
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObjectAcl",
-                                       resource,
-                                       conditions=tag_conditional)
-
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    tagset = []
-    tagset.append({'Key': 'security', 'Value': 'public'})
-    tagset.append({'Key': 'foo', 'Value': 'bar'})
-
-    input_tagset = {'TagSet': tagset}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset2 = []
-    tagset2.append({'Key': 'security', 'Value': 'private'})
-
-    input_tagset = {'TagSet': tagset2}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    tagset3 = []
-    tagset3.append({'Key': 'security1', 'Value': 'public'})
-
-    input_tagset = {'TagSet': tagset3}
-
-    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    alt_client = get_alt_client()
-    response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    # A get object itself should fail since we allowed only GetObjectTagging
-    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':1
-                }
-            }}
-    response = client.put_object_lock_configuration(
-        Bucket=bucket_name,
-        ObjectLockConfiguration=conf)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'COMPLIANCE',
-                    'Years':1
-                }
-            }}
-    response = client.put_object_lock_configuration(
-        Bucket=bucket_name,
-        ObjectLockConfiguration=conf)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    response = client.get_bucket_versioning(Bucket=bucket_name)
-    assert response['Status'] == 'Enabled'
-
-
-def test_object_lock_put_obj_lock_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'InvalidBucketState'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock_with_days_and_years():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':1,
-                    'Years':1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock_invalid_days():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':0
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRetentionPeriod'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock_invalid_years():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Years':-1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRetentionPeriod'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock_invalid_mode():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'abc',
-                    'Years':1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'governance',
-                    'Years':1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_lock_invalid_status():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Disabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Years':1
-                }
-            }}
-    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_suspend_versioning():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 409
-    assert error_code == 'InvalidBucketState'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_get_obj_lock():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':1
-                }
-            }}
-    client.put_object_lock_configuration(
-        Bucket=bucket_name,
-        ObjectLockConfiguration=conf)
-    response = client.get_object_lock_configuration(Bucket=bucket_name)
-    assert response['ObjectLockConfiguration'] == conf
-
-
-def test_object_lock_get_obj_lock_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 404
-    assert error_code == 'ObjectLockConfigurationNotFoundError'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2140,1,1,tzinfo=pytz.UTC)}
-    response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.get_object_retention(Bucket=bucket_name, Key=key)
-    assert response['Retention'] == retention
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-
-def test_object_lock_put_obj_retention_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRequest'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_invalid_mode():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-    retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_get_obj_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    response = client.get_object_retention(Bucket=bucket_name, Key=key)
-    assert response['Retention'] == retention
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_get_obj_retention_iso8601():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    date = datetime.datetime.today() + datetime.timedelta(days=365)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate': date}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    client.meta.events.register('after-call.s3.HeadObject', get_http_response)
-    client.head_object(Bucket=bucket_name,VersionId=version_id,Key=key)
-    retain_date = http_response['headers']['x-amz-object-lock-retain-until-date']
-    isodate.parse_datetime(retain_date)
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-def test_object_lock_get_obj_retention_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRequest'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_versionid():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
-    response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
-    assert response['Retention'] == retention
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_override_default_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    conf = {'ObjectLockEnabled':'Enabled',
-            'Rule': {
-                'DefaultRetention':{
-                    'Mode':'GOVERNANCE',
-                    'Days':1
-                }
-            }}
-    client.put_object_lock_configuration(
-        Bucket=bucket_name,
-        ObjectLockConfiguration=conf)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    response = client.get_object_retention(Bucket=bucket_name, Key=key)
-    assert response['Retention'] == retention
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_increase_period():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
-    retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
-    response = client.get_object_retention(Bucket=bucket_name, Key=key)
-    assert response['Retention'] == retention2
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_shorten_period():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_obj_retention_shorten_period_bypass():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    version_id = response['VersionId']
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
-    response = client.get_object_retention(Bucket=bucket_name, Key=key)
-    assert response['Retention'] == retention
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_object_with_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_multipart_object_with_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-
-    key = 'file1'
-    body = 'abc'
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key, ObjectLockMode='GOVERNANCE',
-                                              ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
-    upload_id = response['UploadId']
-
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=body)
-    parts = [{'ETag': response['ETag'].strip('"'), 'PartNumber': 1}]
-
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_object_with_retention_and_marker():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    del_response = client.delete_object(Bucket=bucket_name, Key=key)
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=del_response['VersionId'])
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_multi_delete_object_with_retention():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key1 = 'file1'
-    key2 = 'file2'
-
-    response1 = client.put_object(Bucket=bucket_name, Body='abc', Key=key1)
-    response2 = client.put_object(Bucket=bucket_name, Body='abc', Key=key2)
-
-    versionId1 = response1['VersionId']
-    versionId2 = response2['VersionId']
-
-    # key1 is under retention, but key2 isn't.
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key1, Retention=retention)
-
-    delete_response = client.delete_objects(
-        Bucket=bucket_name,
-        Delete={
-            'Objects': [
-                {
-                    'Key': key1,
-                    'VersionId': versionId1
-                },
-                {
-                    'Key': key2,
-                    'VersionId': versionId2
-                }
-            ]
-        }
-    )
-
-    assert len(delete_response['Deleted']) == 1
-    assert len(delete_response['Errors']) == 1
-    
-    failed_object = delete_response['Errors'][0]
-    assert failed_object['Code'] == 'AccessDenied'
-    assert failed_object['Key'] == key1
-    assert failed_object['VersionId'] == versionId1
-
-    deleted_object = delete_response['Deleted'][0]
-    assert deleted_object['Key'] == key2
-    assert deleted_object['VersionId'] == versionId2
-
-    delete_response = client.delete_objects(
-        Bucket=bucket_name,
-        Delete={
-            'Objects': [
-                {
-                    'Key': key1,
-                    'VersionId': versionId1
-                }
-            ]
-        },
-        BypassGovernanceRetention=True
-    )
-
-    assert( ('Errors' not in delete_response) or (len(delete_response['Errors']) == 0) )
-    assert len(delete_response['Deleted']) == 1
-    deleted_object = delete_response['Deleted'][0]
-    assert deleted_object['Key'] == key1
-    assert deleted_object['VersionId'] == versionId1
-
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_legal_hold():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    legal_hold = {'Status': 'ON'}
-    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-def test_object_lock_put_legal_hold_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    legal_hold = {'Status': 'ON'}
-    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRequest'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_put_legal_hold_invalid_status():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    legal_hold = {'Status': 'abc'}
-    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'MalformedXML'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_get_legal_hold():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    legal_hold = {'Status': 'ON'}
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
-    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
-    assert response['LegalHold'] == legal_hold
-    legal_hold_off = {'Status': 'OFF'}
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
-    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
-    assert response['LegalHold'] == legal_hold_off
-
-
-def test_object_lock_get_legal_hold_invalid_bucket():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert error_code == 'InvalidRequest'
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_object_with_legal_hold_on():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_multipart_object_with_legal_hold_on():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-
-    key = 'file1'
-    body = 'abc'
-    response = client.create_multipart_upload(Bucket=bucket_name, Key=key, ObjectLockLegalHoldStatus='ON')
-    upload_id = response['UploadId']
-
-    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=body)
-    parts = [{'ETag': response['ETag'].strip('"'), 'PartNumber': 1}]
-
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_delete_object_with_legal_hold_off():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
-    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_get_obj_metadata():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
-    legal_hold = {'Status': 'ON'}
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ObjectLockMode'] == retention['Mode']
-    assert response['ObjectLockRetainUntilDate'] == retention['RetainUntilDate']
-    assert response['ObjectLockLegalHoldStatus'] == legal_hold['Status']
-
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
-
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_uploading_obj():
-    bucket_name = get_new_bucket_name()
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    key = 'file1'
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
-                      ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
-
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ObjectLockMode'] == 'GOVERNANCE'
-    assert response['ObjectLockRetainUntilDate'] == datetime.datetime(2030,1,1,tzinfo=pytz.UTC)
-    assert response['ObjectLockLegalHoldStatus'] == 'ON'
-    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
-    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_changing_mode_from_governance_with_bypass():
-    bucket_name = get_new_bucket_name()
-    key = 'file1'
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    # upload object with mode=GOVERNANCE
-    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
-                      ObjectLockRetainUntilDate=retain_until)
-    # change mode to COMPLIANCE
-    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
-    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_changing_mode_from_governance_without_bypass():
-    bucket_name = get_new_bucket_name()
-    key = 'file1'
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    # upload object with mode=GOVERNANCE
-    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
-                      ObjectLockRetainUntilDate=retain_until)
-    # try to change mode to COMPLIANCE
-    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.fails_on_dbstore
-def test_object_lock_changing_mode_from_compliance():
-    bucket_name = get_new_bucket_name()
-    key = 'file1'
-    client = get_client()
-    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
-    # upload object with mode=COMPLIANCE
-    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
-    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='COMPLIANCE',
-                      ObjectLockRetainUntilDate=retain_until)
-    # try to change mode to GOVERNANCE
-    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':retain_until}
-    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-@pytest.mark.fails_on_dbstore
-def test_copy_object_ifmatch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
-    response = client.get_object(Bucket=bucket_name, Key='bar')
-    body = _get_body(response)
-    assert body == 'bar'
-
-# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
-@pytest.mark.fails_on_rgw
-def test_copy_object_ifmatch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
-@pytest.mark.fails_on_rgw
-def test_copy_object_ifnonematch_good():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 412
-    assert error_code == 'PreconditionFailed'
-
-@pytest.mark.fails_on_dbstore
-def test_copy_object_ifnonematch_failed():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
-
-    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
-    response = client.get_object(Bucket=bucket_name, Key='bar')
-    body = _get_body(response)
-    assert body == 'bar'
-
-# TODO: results in a 404 instead of 400 on the RGW
-@pytest.mark.fails_on_rgw
-def test_object_read_unreadable():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-    assert e.response['Error']['Message'] == 'Couldn\'t parse the specified URI.'
-
-def test_get_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == False
-
-def test_get_public_acl_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == True
-
-def test_get_authpublic_acl_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == True
-
-
-def test_get_publicpolicy_acl_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == False
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == True
-
-
-def test_get_nonpublicpolicy_acl_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == False
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "*"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ],
-        "Condition": {
-            "IpAddress":
-            {"aws:SourceIp": "10.0.0.0/32"}
-        }
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == False
-
-
-def test_get_nonpublicpolicy_deny_bucket_policy_status():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == False
-
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
-        "Action": "s3:ListBucket",
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ],
-        }]
-     })
-
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    resp = client.get_bucket_policy_status(Bucket=bucket_name)
-    assert resp['PolicyStatus']['IsPublic'] == True
-
-def test_get_undefined_public_block():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    # delete the existing public access block configuration
-    # as AWS creates a default public access block configuration
-    resp = client.delete_public_access_block(Bucket=bucket_name)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response_code = ""
-    try:
-        resp = client.get_public_access_block(Bucket=bucket_name)
-    except ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'NoSuchPublicAccessBlockConfiguration'
-
-def test_get_public_block_deny_bucket_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    access_conf = {'BlockPublicAcls': True,
-                   'IgnorePublicAcls': True,
-                   'BlockPublicPolicy': True,
-                   'RestrictPublicBuckets': False}
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-
-    # make sure we can get the public access block
-    resp = client.get_public_access_block(Bucket=bucket_name)
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
-    assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == access_conf['IgnorePublicAcls']
-    assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == access_conf['RestrictPublicBuckets']
-
-    # make bucket policy to deny access
-    resource = _make_arn_resource(bucket_name)
-    policy_document = make_json_policy("s3:GetBucketPublicAccessBlock",
-                                       resource, effect="Deny")
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-
-    # check if the access is denied
-    e = assert_raises(ClientError, client.get_public_access_block, Bucket=bucket_name)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-def test_put_public_block():
-    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    access_conf = {'BlockPublicAcls': True,
-                   'IgnorePublicAcls': True,
-                   'BlockPublicPolicy': True,
-                   'RestrictPublicBuckets': False}
-
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-
-    resp = client.get_public_access_block(Bucket=bucket_name)
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
-    assert resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'] == access_conf['IgnorePublicAcls']
-    assert resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'] == access_conf['RestrictPublicBuckets']
-
-
-def test_block_public_put_bucket_acls():
-    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    access_conf = {'BlockPublicAcls': True,
-                   'IgnorePublicAcls': False,
-                   'BlockPublicPolicy': True,
-                   'RestrictPublicBuckets': False}
-
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-
-    resp = client.get_public_access_block(Bucket=bucket_name)
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
-    assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-def test_block_public_object_canned_acls():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    access_conf = {'BlockPublicAcls': True,
-                   'IgnorePublicAcls': False,
-                   'BlockPublicPolicy': False,
-                   'RestrictPublicBuckets': False}
-
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-
-    # resp = client.get_public_access_block(Bucket=bucket_name)
-    # assert resp['PublicAccessBlockConfiguration']['BlockPublicAcls'] == access_conf['BlockPublicAcls']
-    # assert resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'] == access_conf['BlockPublicPolicy']
-
-    #FIXME: use empty body until #42208
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-
-
-def test_block_public_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    access_conf = {'BlockPublicAcls': False,
-                   'IgnorePublicAcls': False,
-                   'BlockPublicPolicy': True,
-                   'RestrictPublicBuckets': False}
-
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-    policy_document = make_json_policy("s3:GetObject",
-                                       resource)
-
-    check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
-
-
-def test_ignore_public_acls():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    alt_client = get_alt_client()
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-    # Public bucket should be accessible
-    alt_client.list_objects(Bucket=bucket_name)
-
-    client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
-    resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
-    assert _get_body(resp) == 'abcde'
-
-    access_conf = {'BlockPublicAcls': False,
-                   'IgnorePublicAcls': True,
-                   'BlockPublicPolicy': False,
-                   'RestrictPublicBuckets': False}
-
-    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
-    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
-
-    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
-    # IgnorePublicACLs is true, so regardless this should behave as a private bucket
-    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
-    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
-
-
-def test_multipart_upload_on_a_bucket_with_policy():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    resource1 = "arn:aws:s3:::" + bucket_name
-    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
-    policy_document = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": "*",
-        "Action": "*",
-        "Resource": [
-            resource1,
-            resource2
-          ],
-        }]
-     })
-    key = "foo"
-    objlen=50*1024*1024
-    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
-    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client)
-    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def _put_bucket_encryption_s3(client, bucket_name):
-    """
-    enable a default encryption policy on the given bucket
-    """
-    server_side_encryption_conf = {
-        'Rules': [
-            {
-                'ApplyServerSideEncryptionByDefault': {
-                    'SSEAlgorithm': 'AES256'
-                }
-            },
-        ]
-    }
-    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-def _put_bucket_encryption_kms(client, bucket_name):
-    """
-    enable a default encryption policy on the given bucket
-    """
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-again'
-    server_side_encryption_conf = {
-        'Rules': [
-            {
-                'ApplyServerSideEncryptionByDefault': {
-                    'SSEAlgorithm': 'aws:kms',
-                    'KMSMasterKeyID': kms_keyid
-                }
-            },
-        ]
-    }
-    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-@pytest.mark.sse_s3
-def test_put_bucket_encryption_s3():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_s3(client, bucket_name)
-
-@pytest.mark.encryption
-def test_put_bucket_encryption_kms():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_kms(client, bucket_name)
-
-
-@pytest.mark.sse_s3
-def test_get_bucket_encryption_s3():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response_code = ""
-    try:
-        client.get_bucket_encryption(Bucket=bucket_name)
-    except ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
-
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    response = client.get_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'AES256'
-
-
-@pytest.mark.encryption
-def test_get_bucket_encryption_kms():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        kms_keyid = 'fool-me-again'
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response_code = ""
-    try:
-        client.get_bucket_encryption(Bucket=bucket_name)
-    except ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
-
-    _put_bucket_encryption_kms(client, bucket_name)
-
-    response = client.get_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'] == 'aws:kms'
-    assert response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID'] == kms_keyid
-
-
-@pytest.mark.sse_s3
-def test_delete_bucket_encryption_s3():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.delete_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    response = client.delete_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response_code = ""
-    try:
-        client.get_bucket_encryption(Bucket=bucket_name)
-    except ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
-
-
-@pytest.mark.encryption
-def test_delete_bucket_encryption_kms():
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    response = client.delete_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    _put_bucket_encryption_kms(client, bucket_name)
-
-    response = client.delete_bucket_encryption(Bucket=bucket_name)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    response_code = ""
-    try:
-        client.get_bucket_encryption(Bucket=bucket_name)
-    except ClientError as e:
-        response_code = e.response['Error']['Code']
-
-    assert response_code == 'ServerSideEncryptionConfigurationNotFoundError'
-
-def _test_sse_s3_default_upload(file_size):
-    """
-    Test enables bucket encryption.
-    Create a file of A's of certain size, and use it to set_contents_from_file.
-    Re-read the contents, and confirm we get same content as input i.e., A's
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    data = 'A'*file_size
-    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-
-    response = client.get_object(Bucket=bucket_name, Key='testobj')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-    body = _get_body(response)
-    assert body == data
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_upload_1b():
-    _test_sse_s3_default_upload(1)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_upload_1kb():
-    _test_sse_s3_default_upload(1024)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_upload_1mb():
-    _test_sse_s3_default_upload(1024*1024)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_upload_8mb():
-    _test_sse_s3_default_upload(8*1024*1024)
-
-def _test_sse_kms_default_upload(file_size):
-    """
-    Test enables bucket encryption.
-    Create a file of A's of certain size, and use it to set_contents_from_file.
-    Re-read the contents, and confirm we get same content as input i.e., A's
-    """
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_kms(client, bucket_name)
-
-    data = 'A'*file_size
-    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
-
-    response = client.get_object(Bucket=bucket_name, Key='testobj')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
-    body = _get_body(response)
-    assert body == data
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_default_upload_1b():
-    _test_sse_kms_default_upload(1)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_default_upload_1kb():
-    _test_sse_kms_default_upload(1024)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_default_upload_1mb():
-    _test_sse_kms_default_upload(1024*1024)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_default_upload_8mb():
-    _test_sse_kms_default_upload(8*1024*1024)
-
-
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_method_head():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    data = 'A'*1000
-    key = 'testobj'
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    response = client.head_object(Bucket=bucket_name, Key=key)
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-
-    sse_s3_headers = {
-        'x-amz-server-side-encryption': 'AES256',
-    }
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_s3_headers))
-    client.meta.events.register('before-call.s3.HeadObject', lf)
-    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 400
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_multipart_upload():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    key = "multipart_enc"
-    content_type = 'text/plain'
-    objlen = 30 * 1024 * 1024
-    metadata = {'foo': 'bar'}
-    enc_headers = {
-        'Content-Type': content_type
-    }
-    resend_parts = []
-
-    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
-            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
-    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
-    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
-    response = client.list_objects_v2(Bucket=bucket_name, Prefix=key)
-    assert len(response['Contents']) == 1
-    assert response['Contents'][0]['Size'] == objlen
-
-    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
-    client.meta.events.register('before-call.s3.UploadPart', lf)
-
-    response = client.get_object(Bucket=bucket_name, Key=key)
-
-    assert response['Metadata'] == metadata
-    assert response['ResponseMetadata']['HTTPHeaders']['content-type'] == content_type
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-
-    body = _get_body(response)
-    assert body == data
-    size = response['ContentLength']
-    assert len(body) == size
-
-    _check_content_using_range(key, bucket_name, data, 1000000)
-    _check_content_using_range(key, bucket_name, data, 10000000)
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_default_post_object_authenticated_request():
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_s3(client, bucket_name)
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {
-            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
-            "conditions": [
-                {"bucket": bucket_name},
-                ["starts-with", "$key", "foo"],
-                {"acl": "private"},
-                ["starts-with", "$Content-Type", "text/plain"],
-                ["starts-with", "$x-amz-server-side-encryption", ""], 
-                ["content-length-range", 0, 1024]
-            ]
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),
-    ('file', ('bar'))])
-
-    r = requests.post(url, files = payload)
-    assert r.status_code == 204
-
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-    body = _get_body(response)
-    assert body == 'bar'
-
-@pytest.mark.encryption
-@pytest.mark.bucket_encryption
-@pytest.mark.fails_on_dbstore
-def test_sse_kms_default_post_object_authenticated_request():
-    kms_keyid = get_main_kms_keyid()
-    if kms_keyid is None:
-        pytest.skip('[s3 main] section missing kms_keyid')
-    bucket_name = get_new_bucket()
-    client = get_client()
-    _put_bucket_encryption_kms(client, bucket_name)
-
-    url = _get_post_url(bucket_name)
-    utc = pytz.utc
-    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
-
-    policy_document = {
-            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
-            "conditions": [
-                {"bucket": bucket_name},
-                ["starts-with", "$key", "foo"],
-                {"acl": "private"},
-                ["starts-with", "$Content-Type", "text/plain"],
-                ["starts-with", "$x-amz-server-side-encryption", ""], 
-                ["content-length-range", 0, 1024]
-            ]
-    }
-
-    json_policy_document = json.JSONEncoder().encode(policy_document)
-    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
-    policy = base64.b64encode(bytes_json_policy_document)
-    aws_secret_access_key = get_main_aws_secret_key()
-    aws_access_key_id = get_main_aws_access_key()
-
-    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
-
-    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
-    ("acl" , "private"),("signature" , signature),("policy" , policy),\
-    ("Content-Type" , "text/plain"),
-    ('file', ('bar'))])
-
-    r = requests.post(url, files = payload)
-    assert r.status_code == 204
-
-    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'aws:kms'
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'] == kms_keyid
-    body = _get_body(response)
-    assert body == 'bar'
-
-
-def _test_sse_s3_encrypted_upload(file_size):
-    """
-    Test upload of the given size, specifically requesting sse-s3 encryption.
-    """
-    bucket_name = get_new_bucket()
-    client = get_client()
-
-    data = 'A'*file_size
-    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data, ServerSideEncryption='AES256')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-
-    response = client.get_object(Bucket=bucket_name, Key='testobj')
-    assert response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'] == 'AES256'
-    body = _get_body(response)
-    assert body == data
-
-@pytest.mark.encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_encrypted_upload_1b():
-    _test_sse_s3_encrypted_upload(1)
-
-@pytest.mark.encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_encrypted_upload_1kb():
-    _test_sse_s3_encrypted_upload(1024)
-
-@pytest.mark.encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_encrypted_upload_1mb():
-    _test_sse_s3_encrypted_upload(1024*1024)
-
-@pytest.mark.encryption
-@pytest.mark.sse_s3
-@pytest.mark.fails_on_dbstore
-def test_sse_s3_encrypted_upload_8mb():
-    _test_sse_s3_encrypted_upload(8*1024*1024)
-
-def test_get_object_torrent():
-    client = get_client()
-    bucket_name = get_new_bucket()
-    key = 'Avatar.mpg'
-
-    file_size = 7 * 1024 * 1024
-    data = 'A' * file_size
-
-    client.put_object(Bucket=bucket_name, Key=key, Body=data)
-
-    response = None
-    try:
-        response = client.get_object_torrent(Bucket=bucket_name, Key=key)
-        # if successful, verify the torrent contents are different from the body
-        assert data != _get_body(response)
-    except ClientError as e:
-        # accept 404 errors - torrent support may not be configured
-        status, error_code = _get_status_and_error_code(e.response)
-        assert status == 404
-        assert error_code == 'NoSuchKey'
-
-def test_upload_part_copy_percent_encoded_key():
-    
-    s3_client = get_client()
-    bucket_name = get_new_bucket()
-    key = "anyfile.txt"
-    encoded_key = "anyfilename%25.txt"
-    raw_key = "anyfilename%.txt"
-
-    ## PutObject: the copy source
-    s3_client.put_object(
-        Bucket=bucket_name,
-        Key=encoded_key,
-        Body=b"foo",
-        ContentType="text/plain"
-    )
-
-    # Upload the target object (initial state)
-    s3_client.put_object(
-        Bucket=bucket_name,
-        Key=key,
-        Body=b"foo",
-        ContentType="text/plain"
-    )
-
-    # Initiate multipart upload
-    mp_response = s3_client.create_multipart_upload(
-        Bucket=bucket_name,
-        Key=key
-    )
-    upload_id = mp_response["UploadId"]
-
-    # The following operation is expected to fail
-    with pytest.raises(s3_client.exceptions.ClientError) as exc_info:
-        s3_client.upload_part_copy(
-            Bucket=bucket_name,
-            Key=key,
-            PartNumber=1,
-            UploadId=upload_id,
-            CopySource={'Bucket': bucket_name, 'Key': raw_key}
-        )
-
-    # Download the object and verify content
-    final_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
-    content = final_obj['Body'].read()
-    assert content == b"foo"
diff --git a/s3tests_boto3/functional/test_s3select.py b/s3tests_boto3/functional/test_s3select.py
deleted file mode 100644 (file)
index 1c0587a..0000000
+++ /dev/null
@@ -1,1685 +0,0 @@
-import pytest
-import random
-import string
-import re
-import json
-from botocore.exceptions import ClientError
-from botocore.exceptions import EventStreamError
-
-import uuid
-import warnings
-import traceback
-
-from . import (
-    configfile,
-    setup_teardown,
-    get_client,
-    get_new_bucket_name
-    )
-
-import logging
-logging.basicConfig(level=logging.INFO)
-
-import collections
-collections.Callable = collections.abc.Callable
-
-region_name = ''
-
-# recurssion function for generating arithmetical expression 
-def random_expr(depth):
-    # depth is the complexity of expression 
-    if depth==1 :
-        return str(int(random.random() * 100) + 1)+".0"
-    return '(' + random_expr(depth-1) + random.choice(['+','-','*','/']) + random_expr(depth-1) + ')'
-
-
-def generate_s3select_where_clause(bucket_name,obj_name):
-
-    a=random_expr(4)
-    b=random_expr(4)
-    s=random.choice([ '<','>','=','<=','>=','!=' ])
-
-    try:
-        eval( a )
-        eval( b )
-    except ZeroDivisionError:
-        return
-
-    # generate s3select statement using generated randome expression
-    # upon count(0)>0 it means true for the where clause expression
-    # the python-engine {eval( conditional expression )} should return same boolean result.
-    s3select_stmt =  "select count(0) from s3object where " + a + s + b + ";"
-
-    res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,s3select_stmt) ).replace(",","")
-
-    if  s == '=':
-        s = '=='
-
-    s3select_assert_result(int(res)>0 , eval( a + s + b ))
-
-def generate_s3select_expression_projection(bucket_name,obj_name):
-
-        # generate s3select statement using generated randome expression
-        # statement return an arithmetical result for the generated expression.
-        # the same expression is evaluated by python-engine, result should be close enough(Epsilon)
-        
-        e = random_expr( 4 )
-
-        try:
-            eval( e )
-        except ZeroDivisionError:
-            return
-
-        if eval( e ) == 0:
-            return
-
-        res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,"select " + e + " from s3object;",) ).replace(",","")
-
-        # accuracy level 
-        epsilon = float(0.00001) 
-
-        # both results should be close (epsilon)
-        assert(  abs(float(res.split("\n")[0]) - eval(e)) < epsilon )
-
-@pytest.mark.s3select
-def get_random_string():
-
-    return uuid.uuid4().hex[:6].upper()
-
-@pytest.mark.s3select
-def test_generate_where_clause():
-
-    # create small csv file for testing the random expressions
-    single_line_csv = create_random_csv_object(1,1)
-    bucket_name = get_new_bucket_name()
-    obj_name = get_random_string() #"single_line_csv.csv"
-    upload_object(bucket_name,obj_name,single_line_csv)
-       
-    for _ in range(100): 
-        generate_s3select_where_clause(bucket_name,obj_name)
-
-
-@pytest.mark.s3select
-def test_generate_projection():
-
-    # create small csv file for testing the random expressions
-    single_line_csv = create_random_csv_object(1,1)
-    bucket_name = get_new_bucket_name()
-    obj_name = get_random_string() #"single_line_csv.csv"
-    upload_object(bucket_name,obj_name,single_line_csv)
-       
-    for _ in range(100): 
-        generate_s3select_expression_projection(bucket_name,obj_name)
-
-def s3select_assert_result(a,b):
-    if type(a) == str:
-        a_strip = a.strip()
-        b_strip = b.strip()
-        if a=="" and b=="":
-            warnings.warn(UserWarning("{}".format("both results are empty, it may indicates a wrong input, please check the test input")))
-            ## print the calling function that created the empty result.
-            stack = traceback.extract_stack(limit=2)
-            formatted_stack = traceback.format_list(stack)[0]
-            warnings.warn(UserWarning("{}".format(formatted_stack)))
-            return True
-        assert a_strip != ""
-        assert b_strip != ""
-    else:
-        if a=="" and b=="":
-            warnings.warn(UserWarning("{}".format("both results are empty, it may indicates a wrong input, please check the test input")))
-            ## print the calling function that created the empty result.
-            stack = traceback.extract_stack(limit=2)
-            formatted_stack = traceback.format_list(stack)[0]
-            warnings.warn(UserWarning("{}".format(formatted_stack)))
-            return True
-        assert a != ""
-        assert b != ""
-    assert True
-
-def create_csv_object_for_datetime(rows,columns):
-        result = ""
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                row = row + "{}{:02d}{:02d}T{:02d}{:02d}{:02d}Z,".format(random.randint(0,100)+1900,random.randint(1,12),random.randint(1,28),random.randint(0,23),random.randint(0,59),random.randint(0,59),)
-            result += row + "\n"
-
-        return result
-
-def create_random_csv_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = ""
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                row = row + "{}{}".format(random.randint(0,1000),col_delim)
-            result += row + record_delim
-
-        return result
-
-def create_random_csv_object_string(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = ""
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                if random.randint(0,9) == 5:
-                    row = row + "{}{}".format(''.join(random.choice(string.ascii_letters) for m in range(10)) + "aeiou",col_delim)
-                else:
-                    row = row + "{}{}".format(''.join("cbcd" + random.choice(string.ascii_letters) for m in range(10)) + "vwxyzzvwxyz" ,col_delim)
-                
-            result += row + record_delim
-
-        return result
-
-def create_random_csv_object_trim(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = ""
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                if random.randint(0,5) == 2:
-                    row = row + "{}{}".format(''.join("   aeiou    ") ,col_delim)
-                else:
-                    row = row + "{}{}".format(''.join("abcd") ,col_delim)
-
-
-                
-            result += row + record_delim
-
-        return result
-
-def create_random_csv_object_escape(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = ""
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                if random.randint(0,9) == 5:
-                    row = row + "{}{}".format(''.join("_ar") ,col_delim)
-                else:
-                    row = row + "{}{}".format(''.join("aeio_")  ,col_delim)
-                
-            result += row + record_delim
-
-        return result
-
-def create_random_csv_object_null(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = ""
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            for _ in range(columns):
-                if random.randint(0,5) == 2:
-                    row = row + "{}{}".format(''.join("") ,col_delim)
-                else:
-                    row = row + "{}{}".format(''.join("abc") ,col_delim)
-                
-            result += row + record_delim
-
-        return result
-
-def create_random_json_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
-        result = "{\"root\" : ["
-        result += record_delim
-        if len(csv_schema)>0 :
-            result = csv_schema + record_delim
-
-        for _ in range(rows):
-            row = ""
-            num = 0
-            row += "{"
-            for _ in range(columns):
-                num += 1
-                row = row + "\"c" + str(num) + "\"" + ": " "{}{}".format(random.randint(0,1000),col_delim)
-            row = row[:-1]
-            row += "}"
-            row += ","
-            result += row + record_delim
-        
-        result = result[:-2]  
-        result += record_delim
-        result += "]" + "}"
-
-        return result
-
-def csv_to_json(obj, field_split=",",row_split="\n",csv_schema=""):
-    result = "{\"root\" : ["
-    result += row_split
-    if len(csv_schema)>0 :
-        result = csv_schema + row_split
-    
-    for rec in obj.split(row_split):
-        row = ""
-        num = 0
-        row += "{"
-        for col in rec.split(field_split):
-            if col == "":
-                break
-            num += 1
-            row = row + "\"c" + str(num) + "\"" + ": " "{}{}".format(col,field_split)
-        row = row[:-1]
-        row += "}"
-        row += ","
-        result += row + row_split
-        
-    result = result[:-5]  
-    result += row_split
-    result += "]" + "}"
-
-    return result
-
-def upload_object(bucket_name,new_key,obj):
-
-        client = get_client()
-        client.create_bucket(Bucket=bucket_name)
-        client.put_object(Bucket=bucket_name, Key=new_key, Body=obj)
-
-        # validate uploaded object
-        c2 = get_client()
-        response = c2.get_object(Bucket=bucket_name, Key=new_key)
-        assert response['Body'].read().decode('utf-8') == obj, 's3select error[ downloaded object not equal to uploaded objecy'
-
-def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE", progress = False):
-
-    s3 = get_client()
-    result = ""
-    result_status = {}
-
-    try:
-        r = s3.select_object_content(
-        Bucket=bucket,
-        Key=key,
-        ExpressionType='SQL',
-        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
-        OutputSerialization = {"CSV": {}},
-        Expression=query,
-        RequestProgress = {"Enabled": progress})
-
-    except ClientError as c:
-        result += str(c)
-        return result
-
-    if progress == False:
-
-        try:
-            for event in r['Payload']:
-                if 'Records' in event:
-                    records = event['Records']['Payload'].decode('utf-8')
-                    result += records
-
-        except EventStreamError as c:
-            result = str(c)
-            return result
-        
-    else:
-            result = []
-            max_progress_scanned = 0
-            for event in r['Payload']:
-                if 'Records' in event:
-                    records = event['Records']
-                    result.append(records.copy())
-                if 'Progress' in event:
-                    if(event['Progress']['Details']['BytesScanned'] > max_progress_scanned):
-                        max_progress_scanned = event['Progress']['Details']['BytesScanned']
-                        result_status['Progress'] = event['Progress']
-
-                if 'Stats' in event:
-                    result_status['Stats'] = event['Stats']
-                if 'End' in event:
-                    result_status['End'] = event['End']
-
-
-    if progress == False:
-        return result
-    else:
-        return result,result_status
-
-def run_s3select_output(bucket,key,query, quot_field, op_column_delim = ",", op_row_delim = "\n",  column_delim=",", op_quot_char = '"', op_esc_char = '\\', row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE"):
-
-    s3 = get_client()
-
-    r = s3.select_object_content(
-        Bucket=bucket,
-        Key=key,
-        ExpressionType='SQL',
-        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
-        OutputSerialization = {"CSV": {"RecordDelimiter" : op_row_delim, "FieldDelimiter" : op_column_delim, "QuoteCharacter" : op_quot_char, "QuoteEscapeCharacter" : op_esc_char, "QuoteFields" : quot_field}},
-        Expression=query,)
-    
-    result = ""
-    for event in r['Payload']:
-        if 'Records' in event:
-            records = event['Records']['Payload'].decode('utf-8')
-            result += records
-    
-    return result
-
-def run_s3select_json(bucket,key,query, op_row_delim = "\n"):
-
-    s3 = get_client()
-
-    r = s3.select_object_content(
-        Bucket=bucket,
-        Key=key,
-        ExpressionType='SQL',
-        InputSerialization = {"JSON": {"Type": "DOCUMENT"}},
-        OutputSerialization = {"JSON": {}},
-        Expression=query,)
-    #Record delimiter optional in output serialization
-    
-    result = ""
-    for event in r['Payload']:
-        if 'Records' in event:
-            records = event['Records']['Payload'].decode('utf-8')
-            result += records
-    
-    return result
-
-def remove_xml_tags_from_result(obj):
-    result = ""
-    for rec in obj.split("\n"):
-        if(rec.find("Payload")>0 or rec.find("Records")>0):
-            continue
-        result += rec + "\n" # remove by split
-
-    result_strip= result.strip()
-    x = bool(re.search("^failure.*$", result_strip))
-    if x:
-        logging.info(result)
-    assert x == False
-
-    return result
-
-def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"):
-    
-    list_of_int = [] 
-    for rec in obj.split(row_split):
-        col_num = 1
-        if ( len(rec) == 0):
-            continue
-        for col in rec.split(field_split):
-            if (col_num == column_pos):
-                list_of_int.append(int(col))
-            col_num+=1
-
-    return list_of_int
-
-@pytest.mark.s3select
-def test_count_operation():
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-    num_of_rows = 1234
-    obj_to_load = create_random_csv_object(num_of_rows,10)
-    upload_object(bucket_name,csv_obj_name,obj_to_load)
-    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;") ).replace(",","")
-
-    s3select_assert_result( num_of_rows, int( res ))
-
-@pytest.mark.s3select
-def test_count_json_operation():
-    json_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    num_of_rows = 1
-    obj_to_load = create_random_json_object(num_of_rows,10)
-    upload_object(bucket_name,json_obj_name,obj_to_load)
-    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*];"))
-    s3select_assert_result( 1,  int(res))
-
-    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root;"))
-    s3select_assert_result( 1,  int(res))
-
-    obj_to_load = create_random_json_object(3,10)
-    upload_object(bucket_name,json_obj_name,obj_to_load)
-    res = remove_xml_tags_from_result(run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root;"))
-    s3select_assert_result( 3,  int(res))
-
-@pytest.mark.s3select
-def test_json_column_sum_min_max():
-    csv_obj = create_random_csv_object(10000,10)
-
-    json_obj = csv_to_json(csv_obj);
-
-    json_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,json_obj_name,json_obj)
-    
-    json_obj_name_2 = get_random_string()
-    bucket_name_2 = "testbuck2"
-    upload_object(bucket_name_2,json_obj_name_2,json_obj)
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select min(_1.c1) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 1 , csv_obj )
-    res_target = min( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select min(_1.c4) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = min( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select avg(_1.c6) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 6 , csv_obj )
-    res_target = float(sum(list_int ))/10000
-
-    s3select_assert_result( float(res_s3select), float(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select max(_1.c4) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = max( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select max(_1.c7) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 7 , csv_obj )
-    res_target = max( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select sum(_1.c4) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = sum( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select sum(_1.c7) from s3object[*].root;")  ).replace(",","")
-    list_int = create_list_of_int( 7 , csv_obj )
-    res_target = sum( list_int )
-
-    s3select_assert_result(  int(res_s3select) , int(res_target) )
-
-    # the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name_2,json_obj_name_2,"select count(0),sum(_1.c1),sum(_1.c2) from s3object[*].root where (_1.c1-_1.c2) = 2;" ) )
-    count,sum1,sum2 = res_s3select.split(",")
-
-    s3select_assert_result( int(count)*2 , int(sum1)-int(sum2 ) )
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0),sum(_1.c1),sum(_1.c2) from s3object[*].root where (_1.c1-_1.c2) = 4;" ) ) 
-    count,sum1,sum2 = res_s3select.split(",")
-
-    s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) )
-
-@pytest.mark.s3select
-def test_json_nullif_expressions():
-
-    json_obj = create_random_json_object(10000,10)
-
-    json_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,json_obj_name,json_obj)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where nullif(_1.c1,_1.c2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 = _1.c2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (nullif(_1.c1,_1.c2) is null) from s3object[*].root ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (_1.c1 = _1.c2) from s3object[*].root  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where not nullif(_1.c1,_1.c2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 != _1.c2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (nullif(_1.c1,_1.c2) is not null) from s3object[*].root ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select (_1.c1 != _1.c2) from s3object[*].root  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where  nullif(_1.c1,_1.c2) = _1.c1 ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select_json(bucket_name,json_obj_name,"select count(0) from s3object[*].root where _1.c1 != _1.c2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-
-@pytest.mark.s3select
-def test_column_sum_min_max():
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-    
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-    
-    csv_obj_name_2 = get_random_string()
-    bucket_name_2 = "testbuck2"
-    upload_object(bucket_name_2,csv_obj_name_2,csv_obj)
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 1 , csv_obj )
-    res_target = min( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_4)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = min( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select avg(int(_6)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 6 , csv_obj )
-    res_target = float(sum(list_int ))/10000
-
-    s3select_assert_result( float(res_s3select), float(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_4)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = max( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_7)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 7 , csv_obj )
-    res_target = max( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_4)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 4 , csv_obj )
-    res_target = sum( list_int )
-
-    s3select_assert_result( int(res_s3select), int(res_target))
-    
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_7)) from s3object;")  ).replace(",","")
-    list_int = create_list_of_int( 7 , csv_obj )
-    res_target = sum( list_int )
-
-    s3select_assert_result(  int(res_s3select) , int(res_target) )
-
-    # the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name_2,csv_obj_name_2,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 2;" ) )
-    count,sum1,sum2 = res_s3select.split(",")
-
-    s3select_assert_result( int(count)*2 , int(sum1)-int(sum2 ) )
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 4;" ) ) 
-    count,sum1,sum2 = res_s3select.split(",")
-
-    s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) )
-
-@pytest.mark.s3select
-def test_nullif_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where nullif(_1,_2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is null) from s3object ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 = _2) from s3object  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not nullif(_1,_2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is not null) from s3object ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 != _2) from s3object  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where  nullif(_1,_2) = _1 ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    csv_obj = create_random_csv_object_null(10000,10)
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,null) is null;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 is null;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,null) is null) from s3object;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 is null) from s3object;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-@pytest.mark.s3select
-def test_nulliftrue_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) = _1 = true) ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_nullif, res_s3select)
-
-@pytest.mark.s3select
-def test_is_not_null_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,_2) is not null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_null, res_s3select)
-
-    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where (nullif(_1,_1) and _1 = _2) is not null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
-
-    s3select_assert_result( res_s3select_null, res_s3select)
-
-@pytest.mark.s3select
-def test_lowerupper_expressions():
-
-    csv_obj = create_random_csv_object(1,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from s3object ;')  ).replace("\n","")
-
-    s3select_assert_result( res_s3select, "ab12cd$$")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from s3object ;')  ).replace("\n","")
-
-    s3select_assert_result( res_s3select, "AB12CD$$")
-
-@pytest.mark.s3select
-def test_in_expressions():
-
-    # purpose of test: engine is process correctly several projections containing aggregation-functions
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1)) from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1) from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1,0);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1,0)) from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1 or int(_1) = 0) from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) in(1,0,2);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) in(1,0,2)) from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) = 1 or int(_2) = 0 or int(_2) = 2) from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5) from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (character_length(_1) = 2 and substring(_1,2,1) in ("3")) from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "_3") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-@pytest.mark.s3select
-def test_true_false_in_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    ## 1,2 must exist in first/second column (to avoid empty results)
-    csv_obj = csv_obj + "1,2,,,,,,,,,,\n"
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1)) = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1,0)) = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2) in(1,0,2)) = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (character_length(_1) = 2) = true and (substring(_1,2,1) in ("3")) = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in (1,2,0)) as a1 from s3object where a1 = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select \"true\"from s3object where (int(_1) in (1,0,2)) ;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_in, res_s3select )  
-
-@pytest.mark.s3select
-def test_like_expressions():
-
-    csv_obj = create_random_csv_object_string(1000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio%";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select  (_1 like "%aeio%") from s3object ;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,11,4) = "aeio") from s3object ;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "cbcd%";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%" like;')).replace("\n","")
-
-    find_like = res_s3select_like.find("UnsupportedSyntax")
-
-    assert int(find_like) >= 0
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "cbcd%") from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,1,4) = "cbcd") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "%y[y-z]";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "%y[y-z]") from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%yz";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%yz") from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "c%z";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "c%z") from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%xy_";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%xy_") from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x") from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-@pytest.mark.s3select
-def test_truefalselike_expressions():
-
-    csv_obj = create_random_csv_object_string(1000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "%aeio%") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "cbcd%") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "%y[y-z]") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) between "y" and "z") = true and (substring(_3,char_length(_3)-1,1) = "y") = true;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%yz") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2),1) = "z") = true and (substring(_2,char_length(_2)-1,1) = "y") = true;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "c%z") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) = "z") = true and (substring(_3,1,1) = "c") = true;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%xy_") = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2)-1,1) = "y") = true and (substring(_2,char_length(_2)-2,1) = "x") = true;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_like, res_s3select )
-
-@pytest.mark.s3select
-def test_nullif_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where nullif(_1,_2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 = _2  ;")  ).replace("\n","")
-
-    assert res_s3select_nullif == res_s3select
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where not nullif(_1,_2) is null ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2  ;")  ).replace("\n","")
-
-    assert res_s3select_nullif == res_s3select
-
-    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where  nullif(_1,_2) = _1 ;")  ).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from stdin where _1 != _2  ;")  ).replace("\n","")
-
-    assert res_s3select_nullif == res_s3select
-
-@pytest.mark.s3select
-def test_lowerupper_expressions():
-
-    csv_obj = create_random_csv_object(1,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from stdin ;')  ).replace("\n","")
-
-    assert res_s3select == "ab12cd$$"
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from stdin ;')  ).replace("\n","")
-
-    assert res_s3select == "AB12CD$$"
-
-@pytest.mark.s3select
-def test_in_expressions():
-
-    # purpose of test: engine is process correctly several projections containing aggregation-functions 
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1;')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) in(1,0);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) in(1,0,2);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from stdin where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from stdin where _1 like "_3";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-@pytest.mark.s3select
-def test_like_expressions():
-
-    csv_obj = create_random_csv_object_string(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,11,4) = "aeio" ;')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "cbcd%";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_1,1,4) = "cbcd";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "%y[y-z]";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) between "y" and "z" and substring(_3,character_length(_3)-1,1) = "y";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%yz";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2),1) = "z" and substring(_2,character_length(_2)-1,1) = "y";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _3 like "c%z";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_3,character_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _2 like "%xy_";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from stdin where substring(_2,character_length(_2)-1,1) = "y" and substring(_2,character_length(_2)-2,1) = "x";')).replace("\n","")
-
-    assert res_s3select_in == res_s3select 
-
-
-@pytest.mark.s3select
-def test_complex_expressions():
-
-    # purpose of test: engine is process correctly several projections containing aggregation-functions 
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;")).replace("\n","")
-
-    min_1 = min ( create_list_of_int( 1 , csv_obj ) )
-    max_2 = max ( create_list_of_int( 2 , csv_obj ) )
-    min_3 = min ( create_list_of_int( 3 , csv_obj ) ) + 1
-
-    __res = "{},{},{}".format(min_1,max_2,min_3)
-    
-    # assert is according to radom-csv function 
-    s3select_assert_result( res_s3select, __res )
-
-    # purpose of test that all where conditions create the same group of values, thus same result
-    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where substring(_2,1,1) = "1" and char_length(_2) = 3;')).replace("\n","")
-
-    res_s3select_between_numbers = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)>=100 and int(_2)<200;')).replace("\n","")
-
-    res_s3select_eq_modolu = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)/100 = 1 and character_length(_2) = 3;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_substring, res_s3select_between_numbers)
-
-    s3select_assert_result( res_s3select_between_numbers, res_s3select_eq_modolu)
-    
-@pytest.mark.s3select
-def test_alias():
-
-    # purpose: test is comparing result of exactly the same queries , one with alias the other without.
-    # this test is setting alias on 3 projections, the third projection is using other projection alias, also the where clause is using aliases
-    # the test validate that where-clause and projections are executing aliases correctly, bare in mind that each alias has its own cache,
-    # and that cache need to be invalidate per new row. 
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1, int(_2) as a2 , (a1+a2) as a3 from s3object where a3>100 and a3<300;")  ).replace(",","")
-
-    res_s3select_no_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1),int(_2),int(_1)+int(_2) from s3object where (int(_1)+int(_2))>100 and (int(_1)+int(_2))<300;")  ).replace(",","")
-
-    s3select_assert_result( res_s3select_alias, res_s3select_no_alias)
-
-
-@pytest.mark.s3select
-def test_alias_cyclic_refernce():
-
-    number_of_rows = 10000
-    
-    # purpose of test is to validate the s3select-engine is able to detect a cyclic reference to alias.
-    csv_obj = create_random_csv_object(number_of_rows,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1,int(_2) as a2, a1+a4 as a3, a5+a1 as a4, int(_3)+a3 as a5 from s3object;")  )
-
-    find_res = res_s3select_alias.find("number of calls exceed maximum size, probably a cyclic reference to alias")
-    
-    assert int(find_res) >= 0 
-
-@pytest.mark.s3select
-def test_datetime():
-
-    # purpose of test is to validate date-time functionality is correct,
-    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
-
-    csv_obj = create_csv_object_for_datetime(10000,1)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where extract(year from to_timestamp(_1)) > 1950 and extract(year from to_timestamp(_1)) < 1960;')  )
-
-    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
-
-    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
-
-    res_s3select_date_time_to_string = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(to_string(to_timestamp(_1), \'x\') as int) from  s3object;')  )
-
-    res_s3select_date_time_extract = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(timezone_hour from to_timestamp(_1)) from  s3object;')  )
-
-    s3select_assert_result( res_s3select_date_time_to_string, res_s3select_date_time_extract )
-
-    res_s3select_date_time_to_timestamp = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(month from to_timestamp(_1)) from s3object where extract(month from to_timestamp(_1)) = 5;')  )
-
-    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(substring(_1, 5, 2) as int) from s3object where _1 like \'____05%\';')  )
-
-    s3select_assert_result( res_s3select_date_time_to_timestamp, res_s3select_substring)
-
-@pytest.mark.s3select
-def test_true_false_datetime():
-
-    # purpose of test is to validate date-time functionality is correct,
-    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
-
-    csv_obj = create_csv_object_for_datetime(10000,1)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (extract(year from to_timestamp(_1)) > 1950) = true and (extract(year from to_timestamp(_1)) < 1960) = true;')  )
-
-    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
-
-    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
-
-    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where  (date_diff(month,to_timestamp(_1),date_add(month,2,to_timestamp(_1)) ) = 2) = true;')  )
-
-    res_s3select_count = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object;')  )
-
-    s3select_assert_result( res_s3select_date_time, res_s3select_count)
-
-    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(year,to_timestamp(_1),date_add(day, 366 ,to_timestamp(_1))) = 1) = true ;')  )
-
-    s3select_assert_result( res_s3select_date_time, res_s3select_count)
-
-    # validate that utcnow is integrate correctly with other date-time functions 
-    res_s3select_date_time_utcnow = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(hour,utcnow(),date_add(day,1,utcnow())) = 24) = true ;')  )
-
-    s3select_assert_result( res_s3select_date_time_utcnow, res_s3select_count)
-
-@pytest.mark.s3select
-def test_csv_parser():
-
-    # purpuse: test default csv values(, \n " \ ), return value may contain meta-char 
-    # NOTE: should note that default meta-char for s3select are also for python, thus for one example double \ is mandatory
-
-    csv_obj = r',first,,,second,third="c31,c32,c33",forth="1,2,3,4",fifth=my_string=\"any_value\" \, my_other_string=\"aaaa\,bbb\" ,' + "\n"
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    # return value contain comma{,}
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _6 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'third=c31,c32,c33')
-
-    # return value contain comma{,}
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _7 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'forth=1,2,3,4')
-
-    # return value contain comma{,}{"}, escape-rule{\} by-pass quote{"} , the escape{\} is removed.
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _8 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'fifth=my_string="any_value" , my_other_string="aaaa,bbb" ')
-
-    # return NULL as first token
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'null')
-
-    # return NULL in the middle of line
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _3 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'null')
-
-    # return NULL in the middle of line (successive)
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _4 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'null')
-
-    # return NULL at the end line
-    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _9 from s3object;")  ).replace("\n","")
-    s3select_assert_result( res_s3select_alias, 'null')
-
-@pytest.mark.s3select
-def test_csv_definition():
-
-    number_of_rows = 10000
-
-    #create object with pipe-sign as field separator and tab as row delimiter.
-    csv_obj = create_random_csv_object(number_of_rows,10,"|","\t")
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-   
-    # purpose of tests is to parse correctly input with different csv defintions  
-    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;","|","\t") ).replace(",","")
-
-    s3select_assert_result( number_of_rows, int(res))
-    
-    # assert is according to radom-csv function 
-    # purpose of test is validate that tokens are processed correctly
-    res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;","|","\t") ).replace("\n","")
-
-    min_1 = min ( create_list_of_int( 1 , csv_obj , "|","\t") )
-    max_2 = max ( create_list_of_int( 2 , csv_obj , "|","\t") )
-    min_3 = min ( create_list_of_int( 3 , csv_obj , "|","\t") ) + 1
-
-    __res = "{},{},{}".format(min_1,max_2,min_3)
-    s3select_assert_result( res_s3select, __res )
-
-
-@pytest.mark.s3select
-def test_schema_definition():
-
-    number_of_rows = 10000
-
-    # purpose of test is to validate functionality using csv header info
-    csv_obj = create_random_csv_object(number_of_rows,10,csv_schema="c1,c2,c3,c4,c5,c6,c7,c8,c9,c10")
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    # ignoring the schema on first line and retrieve using generic column number
-    res_ignore = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _1,_3 from s3object;",csv_header_info="IGNORE") ).replace("\n","")
-
-    # using the scheme on first line, query is using the attach schema
-    res_use = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c3 from s3object;",csv_header_info="USE") ).replace("\n","")
-    # result of both queries should be the same
-    s3select_assert_result( res_ignore, res_use)
-
-    # using column-name not exist in schema
-    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c10,int(c11) from s3object;",csv_header_info="USE") ).replace("\n","")
-
-    assert ((res_multiple_defintion.find("alias {c11} or column not exist in schema")) >= 0)
-
-    #find_processing_error = res_multiple_defintion.find("ProcessingTimeError")
-    assert ((res_multiple_defintion.find("ProcessingTimeError")) >= 0)
-
-    # alias-name is identical to column-name
-    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(c1)+int(c2) as c4,c4 from s3object;",csv_header_info="USE") ).replace("\n","")
-
-    assert ((res_multiple_defintion.find("multiple definition of column {c4} as schema-column and alias"))  >= 0)
-
-@pytest.mark.s3select
-def test_when_then_else_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case when cast(_1 as int)>100 and cast(_1 as int)<200 then "(100-200)" when cast(_1 as int)>200 and cast(_1 as int)<300 then "(200-300)" else "NONE" end from s3object;')  ).replace("\n","")
-
-    count1 = res_s3select.count("(100-200)")  
-
-    count2 = res_s3select.count("(200-300)") 
-
-    count3 = res_s3select.count("NONE")
-
-    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>100 and cast(_1 as int)<200  ;')  ).replace("\n","")
-
-    res1 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>200 and cast(_1 as int)<300  ;')  ).replace("\n","")
-    
-    res2 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)<=100 or cast(_1 as int)>=300 or cast(_1 as int)=200  ;')  ).replace("\n","")
-
-    s3select_assert_result( str(count1) , res)
-
-    s3select_assert_result( str(count2) , res1)
-
-    s3select_assert_result( str(count3) , res2)
-
-@pytest.mark.s3select
-def test_coalesce_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>2 and char_length(_4)>2 and cast(substring(_3,1,2) as int) = cast(substring(_4,1,2) as int);')  ).replace("\n","")  
-
-    res_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_4 as int)>99 and coalesce(nullif(cast(substring(_3,1,2) as int),cast(substring(_4,1,2) as int)),7) = 7;' ) ).replace("\n","") 
-
-    s3select_assert_result( res_s3select, res_null)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(nullif(_5,_5),nullif(_1,_1),_2) from s3object;')  ).replace("\n","") 
-
-    res_coalesce = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(_2) from s3object;')  ).replace("\n","")   
-
-    s3select_assert_result( res_s3select, res_coalesce)
-
-
-@pytest.mark.s3select
-def test_cast_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>999;')  ).replace("\n","")  
-
-    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>3;')  ).replace("\n","") 
-
-    s3select_assert_result( res_s3select, res)
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_3 as int)<1000;')  ).replace("\n","")  
-
-    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)=3;')  ).replace("\n","") 
-
-    s3select_assert_result( res_s3select, res)
-
-@pytest.mark.s3select
-def test_version():
-
-    return
-    number_of_rows = 1
-
-    # purpose of test is to validate functionality using csv header info
-    csv_obj = create_random_csv_object(number_of_rows,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_version = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select version() from s3object;") ).replace("\n","")
-
-    s3select_assert_result( res_version, "41.a," )
-
-@pytest.mark.s3select
-def test_trim_expressions():
-
-    csv_obj = create_random_csv_object_trim(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    ";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-@pytest.mark.s3select
-def test_truefalse_trim_expressions():
-
-    csv_obj = create_random_csv_object_trim(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou" = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou" = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou" = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    " = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou" = true;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_trim, res_s3select )
-
-@pytest.mark.s3select
-def test_escape_expressions():
-
-    csv_obj = create_random_csv_object_escape(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%_ar" escape "%";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,char_length(_1),1) = "r" and substring(_1,char_length(_1)-1,1) = "a" and substring(_1,char_length(_1)-2,1) = "_";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_escape, res_s3select )
-
-    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio$_" escape "$";')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,5) = "aeio_";')).replace("\n","")
-
-    s3select_assert_result( res_s3select_escape, res_s3select )
-
-@pytest.mark.s3select
-def test_case_value_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_case = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case cast(_1 as int) when cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select case when cast(_1 as int) = cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_case, res_s3select )
-
-@pytest.mark.s3select
-def test_bool_cast_expressions():
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_cast = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(int(_1) as bool) = true ;')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where cast(_1 as int) != 0 ;')).replace("\n","")
-
-    s3select_assert_result( res_s3select_cast, res_s3select )
-
-@pytest.mark.s3select
-def test_progress_expressions():
-
-    csv_obj = create_random_csv_object(1000000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    obj_size = len(csv_obj.encode('utf-8'))
-
-    result_status = {}
-    result_size = 0
-
-    res_s3select_response,result_status = run_s3select(bucket_name,csv_obj_name,"select sum(int(_1)) from s3object;",progress = True)
-
-    for rec in res_s3select_response:
-        result_size += len(rec['Payload'])
-
-    records_payload_size = result_size
-   
-    # To do: Validate bytes processed after supporting compressed data
-    s3select_assert_result(obj_size, result_status['Progress']['Details']['BytesScanned'])
-    s3select_assert_result(records_payload_size, result_status['Progress']['Details']['BytesReturned'])
-
-    # stats response payload validation
-    s3select_assert_result(obj_size, result_status['Stats']['Details']['BytesScanned'])
-    s3select_assert_result(records_payload_size, result_status['Stats']['Details']['BytesReturned'])
-
-    # end response
-    s3select_assert_result({}, result_status['End'])
-
-@pytest.mark.s3select
-def test_output_serial_expressions():
-    return # TODO fix test
-
-    csv_obj = create_random_csv_object(10000,10)
-
-    csv_obj_name = get_random_string()
-    bucket_name = get_new_bucket_name()
-
-    upload_object(bucket_name,csv_obj_name,csv_obj)
-
-    res_s3select_1 = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,"select _1, _2 from s3object where nullif(_1,_2) is null ;", "ALWAYS")  ).replace("\n",",").replace(",","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1, _2 from s3object where _1 = _2 ;")  ).replace("\n",",")
-
-    res_s3select_list = res_s3select.split(',')
-
-    res_s3select_list.pop()
-
-    res_s3select_final = (''.join('"' + item + '"' for item in res_s3select_list))
-
-    s3select_assert_result( '""'+res_s3select_1+'""', res_s3select_final)
-
-
-    res_s3select_in = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ASNEEDED", '$', '#')).replace("\n","#") ## TODO why \n appears in output?
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
-    
-    res_s3select_list = res_s3select.split('#')
-
-    res_s3select_list.pop()
-
-    res_s3select_final = (''.join(item + '#' for item in res_s3select_list))
-
-
-    s3select_assert_result(res_s3select_in , res_s3select_final )
-
-
-    res_s3select_quot = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ALWAYS", '$', '#')).replace("\n","")
-
-    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
-    res_s3select_list = res_s3select.split('#')
-
-    res_s3select_list.pop()
-
-    res_s3select_final = (''.join('"' + item + '"' + '#' for item in res_s3select_list))
-
-    s3select_assert_result( '""#'+res_s3select_quot+'""#', res_s3select_final )
diff --git a/s3tests_boto3/functional/test_sns.py b/s3tests_boto3/functional/test_sns.py
deleted file mode 100644 (file)
index 360f14e..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-import json
-import pytest
-from botocore.exceptions import ClientError
-from . import (
-    configfile,
-    get_iam_root_client,
-    get_iam_alt_root_client,
-    get_new_bucket_name,
-    get_prefix,
-    nuke_prefixed_buckets,
-)
-from .iam import iam_root, iam_alt_root
-from .utils import assert_raises, _get_status_and_error_code
-
-def get_new_topic_name():
-    return get_new_bucket_name()
-
-def nuke_topics(client, prefix):
-    p = client.get_paginator('list_topics')
-    for response in p.paginate():
-        for topic in response['Topics']:
-            arn = topic['TopicArn']
-            if prefix not in arn:
-                pass
-            try:
-                client.delete_topic(TopicArn=arn)
-            except:
-                pass
-
-@pytest.fixture
-def sns(iam_root):
-    client = get_iam_root_client(service_name='sns')
-    yield client
-    nuke_topics(client, get_prefix())
-
-@pytest.fixture
-def sns_alt(iam_alt_root):
-    client = get_iam_alt_root_client(service_name='sns')
-    yield client
-    nuke_topics(client, get_prefix())
-
-@pytest.fixture
-def s3(iam_root):
-    client = get_iam_root_client(service_name='s3')
-    yield client
-    nuke_prefixed_buckets(get_prefix(), client)
-
-@pytest.fixture
-def s3_alt(iam_alt_root):
-    client = get_iam_alt_root_client(service_name='s3')
-    yield client
-    nuke_prefixed_buckets(get_prefix(), client)
-
-
-@pytest.mark.iam_account
-@pytest.mark.sns
-def test_account_topic(sns):
-    name = get_new_topic_name()
-
-    response = sns.create_topic(Name=name)
-    arn = response['TopicArn']
-    assert arn.startswith('arn:aws:sns:')
-    assert arn.endswith(f':{name}')
-
-    response = sns.list_topics()
-    assert arn in [p['TopicArn'] for p in response['Topics']]
-
-    sns.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
-
-    response = sns.get_topic_attributes(TopicArn=arn)
-    assert 'Attributes' in response
-
-    sns.delete_topic(TopicArn=arn)
-
-    response = sns.list_topics()
-    assert arn not in [p['TopicArn'] for p in response['Topics']]
-
-    with pytest.raises(sns.exceptions.NotFoundException):
-        sns.get_topic_attributes(TopicArn=arn)
-    sns.delete_topic(TopicArn=arn)
-
-@pytest.mark.iam_account
-@pytest.mark.sns
-def test_cross_account_topic(sns, sns_alt):
-    name = get_new_topic_name()
-    arn = sns.create_topic(Name=name)['TopicArn']
-
-    # not visible to any alt user apis
-    with pytest.raises(sns.exceptions.NotFoundException):
-        sns_alt.get_topic_attributes(TopicArn=arn)
-    with pytest.raises(sns.exceptions.NotFoundException):
-        sns_alt.set_topic_attributes(TopicArn=arn, AttributeName='Policy', AttributeValue='')
-
-    # delete returns success
-    sns_alt.delete_topic(TopicArn=arn)
-
-    response = sns_alt.list_topics()
-    assert arn not in [p['TopicArn'] for p in response['Topics']]
-
-@pytest.mark.iam_account
-@pytest.mark.sns
-def test_account_topic_publish(sns, s3):
-    name = get_new_topic_name()
-
-    response = sns.create_topic(Name=name)
-    topic_arn = response['TopicArn']
-
-    bucket = get_new_bucket_name()
-    s3.create_bucket(Bucket=bucket)
-
-    config = {'TopicConfigurations': [{
-        'Id': 'id',
-        'TopicArn': topic_arn,
-        'Events': [ 's3:ObjectCreated:*' ],
-        }]}
-    s3.put_bucket_notification_configuration(
-            Bucket=bucket, NotificationConfiguration=config)
-
-@pytest.mark.iam_account
-@pytest.mark.iam_cross_account
-@pytest.mark.sns
-def test_cross_account_topic_publish(sns, s3_alt, iam_alt_root):
-    name = get_new_topic_name()
-
-    response = sns.create_topic(Name=name)
-    topic_arn = response['TopicArn']
-
-    bucket = get_new_bucket_name()
-    s3_alt.create_bucket(Bucket=bucket)
-
-    config = {'TopicConfigurations': [{
-        'Id': 'id',
-        'TopicArn': topic_arn,
-        'Events': [ 's3:ObjectCreated:*' ],
-        }]}
-
-    # expect AccessDenies because no resource policy allows cross-account access
-    e = assert_raises(ClientError, s3_alt.put_bucket_notification_configuration,
-                      Bucket=bucket, NotificationConfiguration=config)
-    status, error_code = _get_status_and_error_code(e.response)
-    assert status == 403
-    assert error_code == 'AccessDenied'
-
-    # add topic policy to allow the alt user
-    alt_principal = iam_alt_root.get_user()['User']['Arn']
-    policy = json.dumps({
-        'Version': '2012-10-17',
-        'Statement': [{
-            'Effect': 'Allow',
-            'Principal': {'AWS': alt_principal},
-            'Action': 'sns:Publish',
-            'Resource': topic_arn
-            }]
-        })
-    sns.set_topic_attributes(TopicArn=topic_arn, AttributeName='Policy',
-                             AttributeValue=policy)
-
-    s3_alt.put_bucket_notification_configuration(
-            Bucket=bucket, NotificationConfiguration=config)
diff --git a/s3tests_boto3/functional/test_sts.py b/s3tests_boto3/functional/test_sts.py
deleted file mode 100644 (file)
index b13f56d..0000000
+++ /dev/null
@@ -1,2071 +0,0 @@
-import boto3
-import botocore.session
-from botocore.exceptions import ClientError
-from botocore.exceptions import ParamValidationError
-import pytest
-import isodate
-import email.utils
-import datetime
-import threading
-import re
-import pytz
-from collections import OrderedDict
-import requests
-import json
-import base64
-import hmac
-import hashlib
-import xml.etree.ElementTree as ET
-import time
-import operator
-import os
-import string
-import random
-import socket
-import ssl
-import logging
-from collections import namedtuple
-
-from email.header import decode_header
-
-from . import(
-    configfile,
-    setup_teardown,
-    get_iam_client,
-    get_sts_client,
-    get_client,
-    get_alt_user_id,
-    get_config_endpoint,
-    get_new_bucket_name,
-    get_parameter_name,
-    get_main_aws_access_key,
-    get_main_aws_secret_key,
-    get_thumbprint,
-    get_aud,
-    get_token,
-    get_realm_name,
-    check_webidentity,
-    get_iam_access_key,
-    get_iam_secret_key,
-    get_sub,
-    get_azp,
-    get_user_token
-    )
-
-log = logging.getLogger(__name__)
-
-def create_role(iam_client,path,rolename,policy_document,description,sessionduration,permissionboundary,tag_list=None):
-    role_err=None
-    role_response = None
-    if rolename is None:
-        rolename=get_parameter_name()
-    if tag_list is None:
-        tag_list = []
-    try:
-        role_response = iam_client.create_role(Path=path,RoleName=rolename,AssumeRolePolicyDocument=policy_document,Tags=tag_list)
-    except ClientError as e:
-       role_err = e.response['Code']
-    return (role_err,role_response,rolename)
-
-def put_role_policy(iam_client,rolename,policyname,role_policy):
-    role_err=None
-    role_response = None
-    if policyname is None:
-        policyname=get_parameter_name() 
-    try:
-        role_response = iam_client.put_role_policy(RoleName=rolename,PolicyName=policyname,PolicyDocument=role_policy)
-    except ClientError as e:
-       role_err = e.response['Code']
-    return (role_err,role_response)
-
-def put_user_policy(iam_client,username,policyname,policy_document):
-    role_err=None
-    role_response = None
-    if policyname is None:
-        policyname=get_parameter_name()
-    try:
-        role_response = iam_client.put_user_policy(UserName=username,PolicyName=policyname,PolicyDocument=policy_document)
-    except ClientError as e:
-        role_err = e.response['Code']
-    return (role_err,role_response,policyname)
-
-def get_s3_client_using_iam_creds():
-    iam_access_key = get_iam_access_key()
-    iam_secret_key = get_iam_secret_key()
-    default_endpoint = get_config_endpoint()
-
-    s3_client_iam_creds = boto3.client('s3',
-                              aws_access_key_id = iam_access_key,
-                              aws_secret_access_key = iam_secret_key,
-                              endpoint_url=default_endpoint,
-                              region_name='',
-                          )
-
-    return s3_client_iam_creds
-
-def create_oidc_provider(iam_client, url, clientidlist, thumbprintlist):
-    oidc_arn = None
-    oidc_error = None
-    clientids = []
-    if clientidlist is None:
-        clientidlist=clientids
-    try:
-        oidc_response = iam_client.create_open_id_connect_provider(
-            Url=url,
-            ClientIDList=clientidlist,
-            ThumbprintList=thumbprintlist,
-        )
-        oidc_arn = oidc_response['OpenIDConnectProviderArn']
-        print (oidc_arn)
-    except ClientError as e:
-        oidc_error = e.response['Code']
-        print (oidc_error)
-        try:
-            oidc_error = None
-            print (url)
-            if url.startswith('http://'):
-                url = url[len('http://'):]
-            elif url.startswith('https://'):
-                url = url[len('https://'):]
-            elif url.startswith('www.'):
-                url = url[len('www.'):]
-            oidc_arn = 'arn:aws:iam:::oidc-provider/{}'.format(url)
-            print (url)
-            print (oidc_arn)
-            oidc_response = iam_client.get_open_id_connect_provider(OpenIDConnectProviderArn=oidc_arn)
-        except ClientError as e:
-            oidc_arn = None
-    return (oidc_arn, oidc_error)
-
-def get_s3_resource_using_iam_creds():
-    iam_access_key = get_iam_access_key()
-    iam_secret_key = get_iam_secret_key()
-    default_endpoint = get_config_endpoint()
-
-    s3_res_iam_creds = boto3.resource('s3',
-                              aws_access_key_id = iam_access_key,
-                              aws_secret_access_key = iam_secret_key,
-                              endpoint_url=default_endpoint,
-                              region_name='',
-                          )
-
-    return s3_res_iam_creds
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_get_session_token():
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    
-    user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
-    (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-    
-    response=sts_client.get_session_token()
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    
-    s3_client=boto3.client('s3',
-                aws_access_key_id = response['Credentials']['AccessKeyId'],
-               aws_secret_access_key = response['Credentials']['SecretAccessKey'],
-                aws_session_token = response['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='',
-               )
-    bucket_name = get_new_bucket_name()
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-        assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-        finish=s3_client.delete_bucket(Bucket=bucket_name)
-    finally: # clean up user policy even if create_bucket/delete_bucket fails
-        iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name)
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_assume_role_allow():
-    iam_client=get_iam_client()    
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    if role_response:
-        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    else:
-        assert False, role_error
-    
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-    
-    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-    
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='',
-               )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_assume_role_deny():
-    s3bucket_error=None
-    iam_client=get_iam_client()    
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    if role_response:
-        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    else:
-        assert False, role_error
-    
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-    
-    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-    
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='',
-               )
-    bucket_name = get_new_bucket_name()
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    except ClientError as e:
-        s3bucket_error = e.response.get("Error", {}).get("Code")
-    assert s3bucket_error == 'AccessDenied'
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_assume_role_creds_expiry():
-    iam_client=get_iam_client()    
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    if role_response:
-        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    else:
-        assert False, role_error
-    
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-    
-    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,DurationSeconds=900)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-    time.sleep(900)
-    
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='',
-               )
-    bucket_name = get_new_bucket_name()
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    except ClientError as e:
-        s3bucket_error = e.response.get("Error", {}).get("Code")
-    assert s3bucket_error == 'AccessDenied'
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_assume_role_deny_head_nonexistent():
-    # create a bucket with the normal s3 client
-    bucket_name = get_new_bucket_name()
-    get_client().create_bucket(Bucket=bucket_name)
-
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-
-    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    if role_response:
-        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name
-    else:
-        assert False, role_error
-
-    # allow GetObject but deny ListBucket
-    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":"s3:GetObject","Principal":"*","Resource":"arn:aws:s3:::*"}}'
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-
-    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='')
-    status=200
-    try:
-        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
-    except ClientError as e:
-        status = e.response['ResponseMetadata']['HTTPStatusCode']
-    assert status == 403
-
-@pytest.mark.test_of_sts
-@pytest.mark.fails_on_dbstore
-def test_assume_role_allow_head_nonexistent():
-    # create a bucket with the normal s3 client
-    bucket_name = get_new_bucket_name()
-    get_client().create_bucket(Bucket=bucket_name)
-
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    sts_user_id=get_alt_user_id()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-
-    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    if role_response:
-        assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name
-    else:
-        assert False, role_error
-
-    # allow GetObject and ListBucket
-    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":["s3:GetObject","s3:ListBucket"],"Principal":"*","Resource":"arn:aws:s3:::*"}}'
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-
-    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='')
-    status=200
-    try:
-        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
-    except ClientError as e:
-        status = e.response['ResponseMetadata']['HTTPStatusCode']
-    assert status == 404
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.token_claims_trust_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity():
-    check_webidentity()
-    iam_client=get_iam_client()    
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-    
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-    
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    if response:
-        assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-    else:
-        assert False, role_err
-    
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-    
-    s3_client = boto3.client('s3',
-               aws_access_key_id = resp['Credentials']['AccessKeyId'],
-               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-               aws_session_token = resp['Credentials']['SessionToken'],
-               endpoint_url=default_endpoint,
-               region_name='',
-               )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-    
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-'''
-@pytest.mark.webidentity_test
-def test_assume_role_with_web_identity_invalid_webtoken():
-    resp_error=None
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=""
-    try:
-        resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken='abcdef')
-    except InvalidIdentityTokenException as e:
-        log.debug('{}'.format(resp))
-        log.debug('{}'.format(e.response.get("Error", {}).get("Code")))
-        log.debug('{}'.format(e))
-        resp_error = e.response.get("Error", {}).get("Code")
-    assert resp_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-'''
-
-#######################
-# Session Policy Tests
-#######################
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_on_different_buckets():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::test2\",\"arn:aws:s3:::test2/*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-
-    bucket_name_1 = 'test1'
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name_1)
-    except ClientError as e:
-        s3bucket_error = e.response.get("Error", {}).get("Code")
-    assert s3bucket_error == 'AccessDenied'
-
-    bucket_name_2 = 'test2'
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name_2)
-    except ClientError as e:
-        s3bucket_error = e.response.get("Error", {}).get("Code")
-    assert s3bucket_error == 'AccessDenied'
-
-    bucket_body = 'please-write-something'
-    #body.encode(encoding='utf_8')
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'NoSuchBucket'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_on_same_bucket():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_put_obj_denial():
-    check_webidentity()
-    iam_client=get_iam_client()
-    iam_access_key=get_iam_access_key()
-    iam_secret_key=get_iam_secret_key()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_swapping_role_policy_and_session_policy():
-    check_webidentity()
-    iam_client=get_iam_client()
-    iam_access_key=get_iam_access_key()
-    iam_secret_key=get_iam_secret_key()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_different_op_permissions():
-    check_webidentity()
-    iam_client=get_iam_client()
-    iam_access_key=get_iam_access_key()
-    iam_secret_key=get_iam_secret_key()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_with_deny_effect():
-    check_webidentity()
-    iam_client=get_iam_client()
-    iam_access_key=get_iam_access_key()
-    iam_secret_key=get_iam_secret_key()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_check_with_deny_on_same_op():
-    check_webidentity()
-    iam_client=get_iam_client()
-    iam_access_key=get_iam_access_key()
-    iam_secret_key=get_iam_secret_key()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client_iam_creds = get_s3_client_using_iam_creds()
-
-    bucket_name_1 = 'test1'
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_bucket_policy_role_arn():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3client_iamcreds = get_s3_client_using_iam_creds()
-    bucket_name_1 = 'test1'
-    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resource1 = "arn:aws:s3:::" + bucket_name_1
-    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
-    rolearn = "arn:aws:iam:::role/" + general_role_name
-    bucket_policy = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "{}".format(rolearn)},
-        "Action": ["s3:GetObject","s3:PutObject"],
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    try:
-        obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3object_error = e.response.get("Error", {}).get("Code")
-    assert s3object_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_bucket_policy_session_arn():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3client_iamcreds = get_s3_client_using_iam_creds()
-    bucket_name_1 = 'test1'
-    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resource1 = "arn:aws:s3:::" + bucket_name_1
-    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
-    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
-    bucket_policy = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "{}".format(rolesessionarn)},
-        "Action": ["s3:GetObject","s3:PutObject"],
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-    })
-    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-
-    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_copy_object():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3client_iamcreds = get_s3_client_using_iam_creds()
-    bucket_name_1 = 'test1'
-    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resource1 = "arn:aws:s3:::" + bucket_name_1
-    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
-    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
-    print (rolesessionarn)
-    bucket_policy = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Allow",
-        "Principal": {"AWS": "{}".format(rolesessionarn)},
-        "Action": ["s3:GetObject","s3:PutObject"],
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-     })
-    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    copy_source = {
-    'Bucket': bucket_name_1,
-    'Key': 'test-1.txt'
-    }
-
-    s3_client.copy(copy_source, bucket_name_1, "test-2.txt")
-
-    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-2.txt")
-    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_no_bucket_role_policy():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    s3client_iamcreds = get_s3_client_using_iam_creds()
-    bucket_name_1 = 'test1'
-    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\",\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3putobj_error = e.response.get("Error", {}).get("Code")
-    assert s3putobj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.session_policy
-@pytest.mark.fails_on_dbstore
-def test_session_policy_bucket_policy_deny():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    aud=get_aud()
-    token=get_token()
-    realm=get_realm_name()
-
-    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
-    thumbprintlist = [thumbprint]
-    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
-    if oidc_error is not None:
-        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
-
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3client_iamcreds = get_s3_client_using_iam_creds()
-    bucket_name_1 = 'test1'
-    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resource1 = "arn:aws:s3:::" + bucket_name_1
-    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
-    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
-    bucket_policy = json.dumps(
-    {
-        "Version": "2012-10-17",
-        "Statement": [{
-        "Effect": "Deny",
-        "Principal": {"AWS": "{}".format(rolesessionarn)},
-        "Action": ["s3:GetObject","s3:PutObject"],
-        "Resource": [
-            "{}".format(resource1),
-            "{}".format(resource2)
-          ]
-        }]
-    })
-    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
-    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-                aws_access_key_id = resp['Credentials']['AccessKeyId'],
-                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-                aws_session_token = resp['Credentials']['SessionToken'],
-                endpoint_url=default_endpoint,
-                region_name='',
-                )
-    bucket_body = 'this is a test file'
-
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
-    except ClientError as e:
-        s3putobj_error = e.response.get("Error", {}).get("Code")
-    assert s3putobj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_arn
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.token_claims_trust_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_with_sub():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    sub=get_sub()
-    token=get_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":sub\":\""+sub+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.token_claims_trust_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_with_azp():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    azp=get_azp()
-    token=get_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":azp\":\""+azp+"\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_request_tag_trust_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_with_request_tag():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_principal_tag_role_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_with_principal_tag():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:PrincipalTag/Department\":\"Engineering\"}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_principal_tag_role_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_for_all_values():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\",\"Marketing\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_principal_tag_role_policy_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_for_all_values_deny():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    #ForAllValues: The condition returns true if every key value in the request matches at least one value in the policy
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    try:
-        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    except ClientError as e:
-        s3bucket_error = e.response.get("Error", {}).get("Code")
-    assert s3bucket_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_tag_keys_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_tag_keys_trust_policy():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":\"Department\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAnyValue:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_tag_keys_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_tag_keys_role_policy():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":[\"Department\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-    bkt = s3_client.delete_bucket(Bucket=bucket_name)
-    assert bkt['ResponseMetadata']['HTTPStatusCode'] == 204
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_resource_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_resource_tag():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_resource_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_resource_tag_deny():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_resource_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_wrong_resource_tag_deny():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'WrongResourcetag'}]})
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    try:
-        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
-    except ClientError as e:
-        s3_put_obj_error = e.response.get("Error", {}).get("Code")
-    assert s3_put_obj_error == 'AccessDenied'
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_resource_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_resource_tag_princ_tag():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    tags = 'Department=Engineering&Department=Marketing'
-    key = "test-1.txt"
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
-    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_resource_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_resource_tag_copy_obj():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    #create two buckets and add same tags to both
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
-
-    copy_bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=copy_bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(copy_bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    tags = 'Department=Engineering'
-    key = "test-1.txt"
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    #copy to same bucket
-    copy_source = {
-    'Bucket': bucket_name,
-    'Key': 'test-1.txt'
-    }
-
-    s3_client.copy(copy_source, bucket_name, "test-2.txt")
-
-    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key="test-2.txt")
-    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    #copy to another bucket
-    copy_source = {
-    'Bucket': bucket_name,
-    'Key': 'test-1.txt'
-    }
-
-    s3_client.copy(copy_source, copy_bucket_name, "test-1.txt")
-
-    s3_get_obj = s3_client.get_object(Bucket=copy_bucket_name, Key="test-1.txt")
-    assert s3_get_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
-
-@pytest.mark.webidentity_test
-@pytest.mark.abac_test
-@pytest.mark.token_role_tags_test
-@pytest.mark.fails_on_dbstore
-def test_assume_role_with_web_identity_role_resource_tag():
-    check_webidentity()
-    iam_client=get_iam_client()
-    sts_client=get_sts_client()
-    default_endpoint=get_config_endpoint()
-    role_session_name=get_parameter_name()
-    thumbprint=get_thumbprint()
-    user_token=get_user_token()
-    realm=get_realm_name()
-
-    s3_res_iam_creds = get_s3_resource_using_iam_creds()
-
-    s3_client_iam_creds = s3_res_iam_creds.meta.client
-
-    bucket_name = get_new_bucket_name()
-    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
-    assert s3bucket['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
-    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
-
-    oidc_response = iam_client.create_open_id_connect_provider(
-    Url='http://localhost:8080/auth/realms/{}'.format(realm),
-    ThumbprintList=[
-        thumbprint,
-    ],
-    )
-
-    #iam:ResourceTag refers to the tag attached to role, hence the role is allowed to be assumed only when it has a tag matching the policy.
-    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"iam:ResourceTag/Department\":\"Engineering\"}}}]}"
-    tags_list = [
-            {'Key':'Department','Value':'Engineering'},
-            {'Key':'Department','Value':'Marketing'}
-        ]
-
-    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None,tags_list)
-    assert role_response['Role']['Arn'] == 'arn:aws:iam:::role/'+general_role_name+''
-
-    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
-    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
-    assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
-    assert resp['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    s3_client = boto3.client('s3',
-        aws_access_key_id = resp['Credentials']['AccessKeyId'],
-        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
-        aws_session_token = resp['Credentials']['SessionToken'],
-        endpoint_url=default_endpoint,
-        region_name='',
-        )
-
-    bucket_body = 'this is a test file'
-    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
-    assert s3_put_obj['ResponseMetadata']['HTTPStatusCode'] == 200
-
-    oidc_remove=iam_client.delete_open_id_connect_provider(
-    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
-    )
diff --git a/s3tests_boto3/functional/test_utils.py b/s3tests_boto3/functional/test_utils.py
deleted file mode 100644 (file)
index c0dd398..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-from . import utils
-
-def test_generate():
-    FIVE_MB = 5 * 1024 * 1024
-    assert len(''.join(utils.generate_random(0))) == 0
-    assert len(''.join(utils.generate_random(1))) == 1
-    assert len(''.join(utils.generate_random(FIVE_MB - 1))) == FIVE_MB - 1
-    assert len(''.join(utils.generate_random(FIVE_MB))) == FIVE_MB
-    assert len(''.join(utils.generate_random(FIVE_MB + 1))) == FIVE_MB + 1
diff --git a/s3tests_boto3/functional/utils.py b/s3tests_boto3/functional/utils.py
deleted file mode 100644 (file)
index ab84c16..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-import random
-import requests
-import string
-import time
-
-def assert_raises(excClass, callableObj, *args, **kwargs):
-    """
-    Like unittest.TestCase.assertRaises, but returns the exception.
-    """
-    try:
-        callableObj(*args, **kwargs)
-    except excClass as e:
-        return e
-    else:
-        if hasattr(excClass, '__name__'):
-            excName = excClass.__name__
-        else:
-            excName = str(excClass)
-        raise AssertionError("%s not raised" % excName)
-
-def generate_random(size, part_size=5*1024*1024):
-    """
-    Generate the specified number random data.
-    (actually each MB is a repetition of the first KB)
-    """
-    chunk = 1024
-    allowed = string.ascii_letters
-    for x in range(0, size, part_size):
-        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
-        s = ''
-        left = size - x
-        this_part_size = min(left, part_size)
-        for y in range(this_part_size // chunk):
-            s = s + strpart
-        s = s + strpart[:(this_part_size % chunk)]
-        yield s
-        if (x == size):
-            return
-
-def _get_status(response):
-    status = response['ResponseMetadata']['HTTPStatusCode']
-    return status
-
-def _get_status_and_error_code(response):
-    status = response['ResponseMetadata']['HTTPStatusCode']
-    error_code = response['Error']['Code']
-    return status, error_code